Merge pull request #45115 from Intel-tensorflow:dnn0x_clean_runtime

PiperOrigin-RevId: 350152613
Change-Id: Iacb99fd0b501c6eb714a30e4f309698fe4f47088
This commit is contained in:
TensorFlower Gardener 2021-01-05 09:15:26 -08:00
commit 4b945d21d9
3 changed files with 3 additions and 19 deletions

View File

@ -480,13 +480,11 @@ class MklLayoutRewritePass : public GraphOptimizationPass {
{csinfo_.fused_batch_norm_grad_v3,
mkl_op_registry::GetMklOpName(csinfo_.fused_batch_norm_grad_v3),
CopyAttrsAll, FusedBatchNormV3Rewrite, GetRewriteCause()});
#ifdef ENABLE_MKLDNN_V1
rinfo_.push_back({csinfo_.fused_batch_norm_ex,
native_fmt ? csinfo_.mkl_native_fused_batch_norm_ex
: csinfo_.mkl_fused_batch_norm_ex,
CopyAttrsAll, FusedBatchNormExRewrite,
GetRewriteCause()});
#endif
rinfo_.push_back({csinfo_.fused_conv2d,
native_fmt ? csinfo_.mkl_native_fused_conv2d
: csinfo_.mkl_fused_conv2d,
@ -672,14 +670,12 @@ class MklLayoutRewritePass : public GraphOptimizationPass {
rinfo_.push_back({csinfo_.requantize,
mkl_op_registry::GetMklOpName(csinfo_.requantize),
CopyAttrsAll, AlwaysRewrite, GetRewriteCause()});
#ifdef ENABLE_MKLDNN_V1
// Optimized TanhGrad support exists only in DNNL 1.x.
rinfo_.push_back({csinfo_.tanh, mkl_op_registry::GetMklOpName(csinfo_.tanh),
CopyAttrsAll, AlwaysRewrite, GetRewriteCause()});
rinfo_.push_back({csinfo_.tanh_grad,
mkl_op_registry::GetMklOpName(csinfo_.tanh_grad),
CopyAttrsAll, AlwaysRewrite, GetRewriteCause()});
#endif // ENABLE_MKLDNN_V1
rinfo_.push_back({csinfo_.reshape,
mkl_op_registry::GetMklOpName(csinfo_.reshape),
CopyAttrsAll, AlwaysRewrite, GetRewriteCause()});

View File

@ -53,7 +53,6 @@ static void InitGraph(const string& s, Graph* graph,
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
// parser.AllowRelaxedWhitespace(true);
CHECK(parser.MergeFromString(s, &graph_def)) << s;
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, graph));
@ -66,7 +65,6 @@ static void InitGraph(const string& s, Graph* graph,
class MklLayoutPassTest : public ::testing::Test {
public:
MklLayoutPassTest() : graph_(OpRegistry::Global()) {}
// Ashraf added
Node* FindNode(const string& name) {
for (Node* node : graph_.nodes()) {
if (node->name() == name) return node;
@ -3087,8 +3085,6 @@ REGISTER_TEST_ALL_TYPES(NodeRewrite_LeakyReluGrad_Negative);
REGISTER_TEST_ALL_TYPES(NodeRewrite_LeakyReluLeakyReluGrad_Positive);
#undef REGISTER_TEST
#ifdef ENABLE_MKLDNN_V1
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(MklLayoutPassTest, NAME##_##T) { \
DCHECK_EQ(kTensorOrdering, MklTfTensorOrdering::TENSORS_CONTIGUOUS); \
@ -3146,7 +3142,6 @@ REGISTER_TEST_ALL_TYPES(NodeRewrite_TanhGrad_Positive);
}
REGISTER_TEST_ALL_TYPES(NodeRewrite_TanhTanhGrad_Positive);
#undef REGISTER_TEST
#endif // ENABLE_MKLDNN_V1
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(MklLayoutPassTest, NAME##_##T) { \
@ -3513,7 +3508,6 @@ REGISTER_TEST_ALL_TYPES(NodeRewrite_FusedBatchNormGradV3_5D_Negative_2);
#undef DATA_FORMAT
#undef REGISTER_TEST
#ifdef ENABLE_MKLDNN_V1
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(MklLayoutPassTest, NAME##_##T) { \
InitGraph("node { name: 'A' op: '" #INPUT "'}" \
@ -3603,7 +3597,6 @@ REGISTER_TEST_ALL_TYPES(NodeRewrite_FusedBatchNormEx_Negative1);
}
REGISTER_TEST_ALL_TYPES(NodeRewrite_FusedBatchNormEx_Negative2);
#undef REGISTER_TEST
#endif // ENABLE_MKLDNN_V1
TEST_F(MklLayoutPassTest, NodeRewrite_QuantizedDepthwiseConv2D_Positive) {
InitGraph(
@ -5184,8 +5177,8 @@ static void BM_MklLayoutRewritePass(int iters, int op_nodes) {
bool first = true;
while (iters > 0) {
Graph* graph = new Graph(OpRegistry::Global());
InitGraph(s, graph);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
InitGraph(s, graph.get());
int N = graph->num_node_ids();
if (first) {
testing::SetLabel(strings::StrCat("Per graph node. Nodes: ", N));
@ -5193,13 +5186,12 @@ static void BM_MklLayoutRewritePass(int iters, int op_nodes) {
}
{
testing::StartTiming();
std::unique_ptr<Graph> ug(graph);
std::unique_ptr<Graph> ug(graph.get());
RunMklLayoutRewritePass(&ug);
testing::StopTiming();
}
iters -= N; // Our benchmark units are individual graph nodes,
// not whole graphs
// delete graph;
}
}
BENCHMARK(BM_MklLayoutRewritePass)->Arg(1000)->Arg(10000);

View File

@ -232,11 +232,7 @@ REGISTER_OP("_FusedBatchNormEx")
.Output("reserve_space_1: U")
.Output("reserve_space_2: U")
.Output("reserve_space_3: U")
#ifdef ENABLE_MKLDNN_V1
.Attr("T: {half, float, bfloat16}")
#else
.Attr("T: {half, float}")
#endif
.Attr("U: {float}")
.Attr("epsilon: float = 0.0001")
.Attr("exponential_avg_factor: float = 1.0")