clang-format-3.6 regenerated files in this PR
This commit is contained in:
parent
206ebf0a5f
commit
c425f34ad4
@ -76,11 +76,10 @@ Status FoldBatchNorms(const GraphDef& input_graph_def,
|
|||||||
int64 weights_cols;
|
int64 weights_cols;
|
||||||
if (conv_node.op() == "Conv2D") {
|
if (conv_node.op() == "Conv2D") {
|
||||||
weights_cols = weights.shape().dim_size(3);
|
weights_cols = weights.shape().dim_size(3);
|
||||||
}
|
} else if (conv_node.op() == "DepthwiseConv2dNative") {
|
||||||
else if (conv_node.op() == "DepthwiseConv2dNative") {
|
weights_cols =
|
||||||
weights_cols = weights.shape().dim_size(2) * weights.shape().dim_size(3);
|
weights.shape().dim_size(2) * weights.shape().dim_size(3);
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
weights_cols = weights.shape().dim_size(1);
|
weights_cols = weights.shape().dim_size(1);
|
||||||
}
|
}
|
||||||
if ((mul_values.shape().dims() != 1) ||
|
if ((mul_values.shape().dims() != 1) ||
|
||||||
@ -96,7 +95,8 @@ Status FoldBatchNorms(const GraphDef& input_graph_def,
|
|||||||
auto scaled_weights_vector = scaled_weights.flat<float>();
|
auto scaled_weights_vector = scaled_weights.flat<float>();
|
||||||
for (int64 row = 0; row < weights_vector.dimension(0); ++row) {
|
for (int64 row = 0; row < weights_vector.dimension(0); ++row) {
|
||||||
scaled_weights_vector(row) =
|
scaled_weights_vector(row) =
|
||||||
weights_vector(row) * mul_values.flat<float>()(row % weights_cols);
|
weights_vector(row) *
|
||||||
|
mul_values.flat<float>()(row % weights_cols);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the new nodes.
|
// Construct the new nodes.
|
||||||
|
@ -104,8 +104,8 @@ class FoldBatchNormsTest : public ::testing::Test {
|
|||||||
Output weights_op =
|
Output weights_op =
|
||||||
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
|
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
|
||||||
|
|
||||||
Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op, weights_op,
|
Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op,
|
||||||
{1, 1, 1, 1}, "VALID");
|
weights_op, {1, 1, 1, 1}, "VALID");
|
||||||
|
|
||||||
Tensor mul_values_data(DT_FLOAT, TensorShape({4}));
|
Tensor mul_values_data(DT_FLOAT, TensorShape({4}));
|
||||||
test::FillValues<float>(&mul_values_data, {2.0f, 3.0f, 4.0f, 5.0f});
|
test::FillValues<float>(&mul_values_data, {2.0f, 3.0f, 4.0f, 5.0f});
|
||||||
|
@ -32,9 +32,9 @@ Status ErrorIfNotVector(const Tensor& input, const string& input_name,
|
|||||||
int expected_width) {
|
int expected_width) {
|
||||||
if ((input.shape().dims() != 1) ||
|
if ((input.shape().dims() != 1) ||
|
||||||
(input.shape().dim_size(0) != expected_width)) {
|
(input.shape().dim_size(0) != expected_width)) {
|
||||||
return errors::InvalidArgument(
|
return errors::InvalidArgument(input_name,
|
||||||
input_name,
|
" input to batch norm has bad shape: ",
|
||||||
" input to batch norm has bad shape: ", input.shape().DebugString());
|
input.shape().DebugString());
|
||||||
}
|
}
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
@ -119,11 +119,9 @@ Status FuseScaleOffsetToConvWeights(const std::vector<float>& scale_values,
|
|||||||
int64 weights_cols;
|
int64 weights_cols;
|
||||||
if (conv_node.op() == "Conv2D") {
|
if (conv_node.op() == "Conv2D") {
|
||||||
weights_cols = weights.shape().dim_size(3);
|
weights_cols = weights.shape().dim_size(3);
|
||||||
}
|
} else if (conv_node.op() == "DepthwiseConv2dNative") {
|
||||||
else if (conv_node.op() == "DepthwiseConv2dNative") {
|
|
||||||
weights_cols = weights.shape().dim_size(2) * weights.shape().dim_size(3);
|
weights_cols = weights.shape().dim_size(2) * weights.shape().dim_size(3);
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
weights_cols = weights.shape().dim_size(1);
|
weights_cols = weights.shape().dim_size(1);
|
||||||
}
|
}
|
||||||
CHECK_EQ(weights_cols, scale_values.size());
|
CHECK_EQ(weights_cols, scale_values.size());
|
||||||
@ -134,7 +132,7 @@ Status FuseScaleOffsetToConvWeights(const std::vector<float>& scale_values,
|
|||||||
auto scaled_weights_vector = scaled_weights.flat<float>();
|
auto scaled_weights_vector = scaled_weights.flat<float>();
|
||||||
for (int64 row = 0; row < weights_vector.dimension(0); ++row) {
|
for (int64 row = 0; row < weights_vector.dimension(0); ++row) {
|
||||||
scaled_weights_vector(row) =
|
scaled_weights_vector(row) =
|
||||||
weights_vector(row) * scale_values[row % weights_cols];
|
weights_vector(row) * scale_values[row % weights_cols];
|
||||||
}
|
}
|
||||||
// Figure out the remaining bias to add on.
|
// Figure out the remaining bias to add on.
|
||||||
Tensor bias_offset(DT_FLOAT, {weights_cols});
|
Tensor bias_offset(DT_FLOAT, {weights_cols});
|
||||||
@ -193,7 +191,7 @@ Status FuseBatchNormWithConv(const NodeMatch& match,
|
|||||||
}
|
}
|
||||||
|
|
||||||
Status FuseBatchNormWithBatchToSpace(const NodeMatch& match,
|
Status FuseBatchNormWithBatchToSpace(const NodeMatch& match,
|
||||||
std::vector<NodeDef>* new_nodes) {
|
std::vector<NodeDef>* new_nodes) {
|
||||||
// Calculate the scale and offset values to apply.
|
// Calculate the scale and offset values to apply.
|
||||||
std::vector<float> scale_values;
|
std::vector<float> scale_values;
|
||||||
std::vector<float> offset_values;
|
std::vector<float> offset_values;
|
||||||
@ -208,9 +206,8 @@ Status FuseBatchNormWithBatchToSpace(const NodeMatch& match,
|
|||||||
const NodeDef& conv_node = conv_node_match.node;
|
const NodeDef& conv_node = conv_node_match.node;
|
||||||
|
|
||||||
string biasadd_name = conv_node.name() + "/biasadd";
|
string biasadd_name = conv_node.name() + "/biasadd";
|
||||||
TF_RETURN_IF_ERROR(
|
TF_RETURN_IF_ERROR(FuseScaleOffsetToConvWeights(
|
||||||
FuseScaleOffsetToConvWeights(scale_values, offset_values, conv_node_match,
|
scale_values, offset_values, conv_node_match, biasadd_name, new_nodes));
|
||||||
biasadd_name , new_nodes));
|
|
||||||
|
|
||||||
NodeDef new_batch_to_space_node = batch_to_space_node;
|
NodeDef new_batch_to_space_node = batch_to_space_node;
|
||||||
// reuse batch_norm node name
|
// reuse batch_norm node name
|
||||||
|
@ -138,8 +138,8 @@ class FoldOldBatchNormsTest : public ::testing::Test {
|
|||||||
Output weights_op =
|
Output weights_op =
|
||||||
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
|
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
|
||||||
|
|
||||||
Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"),
|
Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op,
|
||||||
input_op, weights_op, {1, 1, 1, 1}, "VALID");
|
weights_op, {1, 1, 1, 1}, "VALID");
|
||||||
|
|
||||||
Tensor mean_data(DT_FLOAT, TensorShape({4}));
|
Tensor mean_data(DT_FLOAT, TensorShape({4}));
|
||||||
test::FillValues<float>(&mean_data, {10.0f, 20.0f, 30.0f, 40.0f});
|
test::FillValues<float>(&mean_data, {10.0f, 20.0f, 30.0f, 40.0f});
|
||||||
@ -164,7 +164,6 @@ class FoldOldBatchNormsTest : public ::testing::Test {
|
|||||||
GraphDef original_graph_def;
|
GraphDef original_graph_def;
|
||||||
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
|
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
|
||||||
|
|
||||||
|
|
||||||
NodeDef batch_norm_node;
|
NodeDef batch_norm_node;
|
||||||
batch_norm_node.set_op("BatchNormWithGlobalNormalization");
|
batch_norm_node.set_op("BatchNormWithGlobalNormalization");
|
||||||
batch_norm_node.set_name("output");
|
batch_norm_node.set_name("output");
|
||||||
@ -294,8 +293,8 @@ class FoldOldBatchNormsTest : public ::testing::Test {
|
|||||||
Output weights_op =
|
Output weights_op =
|
||||||
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
|
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
|
||||||
|
|
||||||
Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"),
|
Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op,
|
||||||
input_op, weights_op, {1, 1, 1, 1}, "VALID");
|
weights_op, {1, 1, 1, 1}, "VALID");
|
||||||
|
|
||||||
Tensor mean_data(DT_FLOAT, TensorShape({4}));
|
Tensor mean_data(DT_FLOAT, TensorShape({4}));
|
||||||
test::FillValues<float>(&mean_data, {10.0f, 20.0f, 30.0f, 40.0f});
|
test::FillValues<float>(&mean_data, {10.0f, 20.0f, 30.0f, 40.0f});
|
||||||
@ -477,16 +476,17 @@ void TestFoldFusedBatchNormsWithBatchToSpace() {
|
|||||||
|
|
||||||
Tensor block_shape_data(DT_INT32, TensorShape({2}));
|
Tensor block_shape_data(DT_INT32, TensorShape({2}));
|
||||||
test::FillValues<int32>(&block_shape_data, {1, 2});
|
test::FillValues<int32>(&block_shape_data, {1, 2});
|
||||||
Output block_shape_op =
|
Output block_shape_op = Const(root.WithOpName("block_shape_op"),
|
||||||
Const(root.WithOpName("block_shape_op"), Input::Initializer(block_shape_data));
|
Input::Initializer(block_shape_data));
|
||||||
|
|
||||||
Tensor crops_data(DT_INT32, TensorShape({2, 2}));
|
Tensor crops_data(DT_INT32, TensorShape({2, 2}));
|
||||||
test::FillValues<int32>(&crops_data, {0, 0, 0, 1});
|
test::FillValues<int32>(&crops_data, {0, 0, 0, 1});
|
||||||
Output crops_op =
|
Output crops_op =
|
||||||
Const(root.WithOpName("crops_op"), Input::Initializer(crops_data));
|
Const(root.WithOpName("crops_op"), Input::Initializer(crops_data));
|
||||||
|
|
||||||
Output batch_to_space_op = BatchToSpaceND(root.WithOpName("batch_to_space_op"),
|
Output batch_to_space_op =
|
||||||
conv_op, block_shape_op, crops_data);
|
BatchToSpaceND(root.WithOpName("batch_to_space_op"), conv_op,
|
||||||
|
block_shape_op, crops_data);
|
||||||
|
|
||||||
Tensor mean_data(DT_FLOAT, TensorShape({2}));
|
Tensor mean_data(DT_FLOAT, TensorShape({2}));
|
||||||
test::FillValues<float>(&mean_data, {10.0f, 20.0f});
|
test::FillValues<float>(&mean_data, {10.0f, 20.0f});
|
||||||
@ -495,8 +495,8 @@ void TestFoldFusedBatchNormsWithBatchToSpace() {
|
|||||||
|
|
||||||
Tensor variance_data(DT_FLOAT, TensorShape({2}));
|
Tensor variance_data(DT_FLOAT, TensorShape({2}));
|
||||||
test::FillValues<float>(&variance_data, {0.25f, 0.5f});
|
test::FillValues<float>(&variance_data, {0.25f, 0.5f});
|
||||||
Output variance_op = Const(root.WithOpName("variance_op"),
|
Output variance_op =
|
||||||
Input::Initializer(variance_data));
|
Const(root.WithOpName("variance_op"), Input::Initializer(variance_data));
|
||||||
|
|
||||||
Tensor beta_data(DT_FLOAT, TensorShape({2}));
|
Tensor beta_data(DT_FLOAT, TensorShape({2}));
|
||||||
test::FillValues<float>(&beta_data, {0.1f, 0.6f});
|
test::FillValues<float>(&beta_data, {0.1f, 0.6f});
|
||||||
@ -570,7 +570,8 @@ TEST_F(FoldOldBatchNormsTest, TestFoldOldBatchNormsAfterDepthwiseConv2dNative) {
|
|||||||
TestFoldOldBatchNormsAfterDepthwiseConv2dNative();
|
TestFoldOldBatchNormsAfterDepthwiseConv2dNative();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNormsAfterDepthwiseConv2dNative) {
|
TEST_F(FoldOldBatchNormsTest,
|
||||||
|
TestFoldFusedBatchNormsAfterDepthwiseConv2dNative) {
|
||||||
TestFoldFusedBatchNormsAfterDepthwiseConv2dNative();
|
TestFoldFusedBatchNormsAfterDepthwiseConv2dNative();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user