From eb07cc9d8f370d1c99c317ade64f9dd210b61c61 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Wed, 17 Jun 2020 23:20:11 +0000 Subject: [PATCH 01/19] Map dtypes to classes and add whitelist op set --- tensorflow/python/framework/python_op_gen.cc | 30 ++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index ca0c5d9ef1a..217033f1c31 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -45,6 +45,36 @@ const int kRightMargin = 78; constexpr char kEagerFallbackSuffix[] = "_eager_fallback"; +std::unordered_map dtypes_map { + {"_dtypes.float16", "_dtypes.Float16"}, + {"_dtypes.float32", "_dtypes.Float32"}, + {"_dtypes.float64", "_dtypes.Float64"}, + {"_dtypes.bfloat16", "_dtypes.BFloat16"}, + {"_dtypes.complex64", "_dtypes.Complex64"}, + {"_dtypes.complex128", "_dtypes.Complex128"}, + {"_dtypes.int8", "_dtypes.Int8"}, + {"_dtypes.uint8", "_dtypes.UInt8"}, + {"_dtypes.uint16", "_dtypes.UInt16"}, + {"_dtypes.uint32", "_dtypes.UInt32"}, + {"_dtypes.uint64", "_dtypes.UInt64"}, + {"_dtypes.int16", "_dtypes.Int16"}, + {"_dtypes.int32", "_dtypes.Int32"}, + {"_dtypes.int64", "_dtypes.Int64"}, + {"_dtypes.bool", "_dtypes.Bool"}, + {"_dtypes.string", "_dtypes.String"}, + {"_dtypes.qint8", "_dtypes.QInt8"}, + {"_dtypes.quint8", "_dtypes.QUInt8"}, + {"_dtypes.qint16", "_dtypes.QInt16"}, + {"_dtypes.quint16", "_dtypes.QUInt16"}, + {"_dtypes.qint32", "_dtypes.QInt32"}, + {"_dtypes.resource", "_dtypes.Resource"}, + {"_dtypes.variant", "_dtypes.Variant"} +}; + +// Add op name to this set to add type annotations +std::unordered_set type_annotate_ops { +}; + string AttrVarName(const string& attr_name, std::unordered_map* attr_expressions) { const string var = strings::StrCat("_attr_", attr_name); From 06075a3baa5f57a8e7c84ac814d7350e4582e017 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Wed, 17 Jun 2020 23:34:23 +0000 Subject: [PATCH 02/19] Add function to generate TypeVars for each op --- tensorflow/python/framework/python_op_gen.cc | 53 ++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 217033f1c31..e8e670de57c 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -178,6 +178,8 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { void AddRawOpExport(const string& parameters); + void GenerateTypeVars(); + void AddAttrForArg(const string& attr, int arg_index) { gtl::InsertIfNotPresent(&inferred_attrs_, attr, op_def_.input_arg(arg_index).name()); @@ -397,6 +399,53 @@ string GenEagerPythonOp::Code() { return prelude_ + result_; } +// Generate TypeVars using attrs +void GenEagerPythonOp::GenerateTypeVars() { + bool added_typevar = false; + for (int i = 0; i allowed_types; + bool has_dtype_half = false; + for (int t : attr.allowed_values().list().type()) { + if (t == 19) { // DT_HALF = 19; + has_dtype_half = true; + break; + } + DataType dtype = static_cast(t); + const string py_dtype = python_op_gen_internal::DataTypeToPython(dtype, "_dtypes."); + if (dtypes_map.find(py_dtype) != dtypes_map.end()) { + allowed_types.emplace_back(dtypes_map[py_dtype]); + } + } + + // Do not create a type variable that includes the dtype half + if (has_dtype_half) continue; + + // If all dtypes are allowed, add them all + if (allowed_types.empty()) { + for (std::pair map_dtype : dtypes_map) { + allowed_types.emplace_back(map_dtype.second); + } + } + + std::sort(allowed_types.begin(), allowed_types.end()); + + string typevar_dtypes; + for (std::vector::iterator it = allowed_types.begin(); it != allowed_types.end(); ++it) { + if (!typevar_dtypes.empty()) strings::StrAppend(&typevar_dtypes, ", "); + strings::StrAppend(&typevar_dtypes, *it); + } + + const string type_var_name = "TV_" + op_def_.name() + "_" + attr.name(); + strings::StrAppend(&result_, type_var_name, " = TypeVar(\"", type_var_name, "\", ", typevar_dtypes,")\n"); + added_typevar = true; + } + } + + if(added_typevar) strings::StrAppend(&result_, "\n"); +} + void GenEagerPythonOp::HandleGraphMode( const string& function_setup, const std::vector& output_sizes) { strings::StrAppend(&result_, " # Add nodes to the TensorFlow graph.\n"); @@ -720,6 +769,9 @@ void GenEagerPythonOp::AddEagerFunctionTeardown( bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( const string& parameters, const std::vector& output_sizes, const string& eager_not_allowed_error) { + if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { + GenerateTypeVars(); + } if (api_def_.visibility() == ApiDef::VISIBLE) { strings::StrAppend(&result_, "@_dispatch.add_dispatch_list\n"); } @@ -1047,6 +1099,7 @@ from tensorflow.python.util.deprecation import deprecated_endpoints from tensorflow.python.util import dispatch as _dispatch from tensorflow.python.util.tf_export import tf_export +from typing import TypeVar )"); for (const auto& op_def : ops.op()) { From c87ccd156017b2cad24740b375a78f322f3951ef Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Wed, 17 Jun 2020 23:51:42 +0000 Subject: [PATCH 03/19] Add type annotations to op parameters --- tensorflow/python/framework/python_op_gen.cc | 69 +++++++++++++++++++- 1 file changed, 66 insertions(+), 3 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index e8e670de57c..777603c502d 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -179,6 +179,7 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { void AddRawOpExport(const string& parameters); void GenerateTypeVars(); + string GetTypeAnnotatedParams(); void AddAttrForArg(const string& attr, int arg_index) { gtl::InsertIfNotPresent(&inferred_attrs_, attr, @@ -343,10 +344,15 @@ string GenEagerPythonOp::Code() { } string parameters; - for (const auto& param : params_no_default_) { - if (!parameters.empty()) strings::StrAppend(¶meters, ", "); - strings::StrAppend(¶meters, param.GetRenameTo()); + if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { + strings::StrAppend(¶meters, GetTypeAnnotatedParams()); + } else { + for (const auto& param : params_no_default_) { + if (!parameters.empty()) strings::StrAppend(¶meters, ", "); + strings::StrAppend(¶meters, param.GetRenameTo()); + } } + string parameters_with_defaults = parameters; for (const auto& param_and_default : params_with_default_) { if (!parameters.empty()) strings::StrAppend(¶meters, ", "); @@ -399,6 +405,63 @@ string GenEagerPythonOp::Code() { return prelude_ + result_; } +string GenEagerPythonOp::GetTypeAnnotatedParams() { + // holds mappings from param name to its type annotation + std::unordered_map param_type_map; + for (int i = 0; i Date: Wed, 17 Jun 2020 23:59:45 +0000 Subject: [PATCH 04/19] Generate type annotations for op return values --- tensorflow/python/framework/python_op_gen.cc | 54 ++++++++++++++++++-- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 777603c502d..8c4d0f5b753 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -180,7 +180,7 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { void GenerateTypeVars(); string GetTypeAnnotatedParams(); - + void AddReturnTypeAnnotation(); void AddAttrForArg(const string& attr, int arg_index) { gtl::InsertIfNotPresent(&inferred_attrs_, attr, op_def_.input_arg(arg_index).name()); @@ -413,7 +413,7 @@ string GenEagerPythonOp::GetTypeAnnotatedParams() { if (attr.type() == "type") { bool has_dtype_half = false; for (int t : attr.allowed_values().list().type()) { - if (t == 19) { // DT_HALF = 19; + if (t == 19) { // DT_HALF = 19 has_dtype_half = true; break; } @@ -471,7 +471,7 @@ void GenEagerPythonOp::GenerateTypeVars() { std::vector allowed_types; bool has_dtype_half = false; for (int t : attr.allowed_values().list().type()) { - if (t == 19) { // DT_HALF = 19; + if (t == 19) { // DT_HALF = 19 has_dtype_half = true; break; } @@ -509,6 +509,51 @@ void GenEagerPythonOp::GenerateTypeVars() { if(added_typevar) strings::StrAppend(&result_, "\n"); } +void GenEagerPythonOp::AddReturnTypeAnnotation() { + string return_type = ""; + if (op_def_.output_arg_size() == 1) { + const auto& arg = op_def_.output_arg(0); + // If the "type" field is set, the return Tensor has a single DataType + if (arg.type() != 0) { + const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); + if (dtypes_map.find(py_dtype) != dtypes_map.end()) { + strings::StrAppend(&return_type, "_ops.Tensor[", dtypes_map[py_dtype], "]"); + } + } + else { + for (int i = 0; i allowed_types; + for (int t : attr.allowed_values().list().type()) { + // Do not add type annotations when return type can be half + if (t == 19) return; // DT_HALF = 19 + DataType dtype = static_cast(t); + const string py_dtype = python_op_gen_internal::DataTypeToPython(dtype, "_dtypes."); + allowed_types.emplace_back(py_dtype); + } + + std::sort(allowed_types.begin(), allowed_types.end()); + + string typevar_dtypes; + for (std::vector::iterator it = allowed_types.begin(); it != allowed_types.end(); ++it) { + if (!typevar_dtypes.empty()) strings::StrAppend(&typevar_dtypes, ", "); + strings::StrAppend(&typevar_dtypes, *it); + } + + const string type_var_name = "TV_" + op_def_.name() + "_" + attr.name(); + strings::StrAppend(&return_type, "_ops.Tensor[", type_var_name, "]"); + } + } + } + + if (!return_type.empty()) { + result_.erase(result_.length() - 2); + strings::StrAppend(&result_, " -> ", return_type, ":\n"); + } + } +} + void GenEagerPythonOp::HandleGraphMode( const string& function_setup, const std::vector& output_sizes) { strings::StrAppend(&result_, " # Add nodes to the TensorFlow graph.\n"); @@ -841,6 +886,9 @@ bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( AddExport(); AddDefLine(function_name_, parameters); + if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { + AddReturnTypeAnnotation(); + } AddDocStringDescription(); AddDocStringArgs(); AddDocStringInputs(); From 40ef6a7ad67a973f971eae13b59b5f25777d037e Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Thu, 18 Jun 2020 22:43:22 +0000 Subject: [PATCH 05/19] Map args & attrs to types and use map to annotate --- tensorflow/python/framework/python_op_gen.cc | 149 ++++++++----------- 1 file changed, 59 insertions(+), 90 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 8c4d0f5b753..79c8800418c 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -47,6 +47,7 @@ constexpr char kEagerFallbackSuffix[] = "_eager_fallback"; std::unordered_map dtypes_map { {"_dtypes.float16", "_dtypes.Float16"}, + {"_dtypes.half", "_dtypes.Half"}, {"_dtypes.float32", "_dtypes.Float32"}, {"_dtypes.float64", "_dtypes.Float64"}, {"_dtypes.bfloat16", "_dtypes.BFloat16"}, @@ -162,7 +163,8 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { bool AddEagerFastPathAndGraphCode(const string& parameters, const std::vector& output_sizes, - const string& eager_not_allowed_error); + const string& eager_not_allowed_error, + std::unordered_map& type_map); bool AddEagerFallbackCode(const string& parameters, const std::vector& output_sizes, const string& num_outputs_expr, @@ -179,8 +181,11 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { void AddRawOpExport(const string& parameters); void GenerateTypeVars(); - string GetTypeAnnotatedParams(); - void AddReturnTypeAnnotation(); + + std::unordered_map GetTypeAnnotationMap(); + + void AddReturnTypeAnnotation(std::unordered_map& type_map); + void AddAttrForArg(const string& attr, int arg_index) { gtl::InsertIfNotPresent(&inferred_attrs_, attr, op_def_.input_arg(arg_index).name()); @@ -343,13 +348,22 @@ string GenEagerPythonOp::Code() { param_names_.push_back(param_and_default.first); } - string parameters; + std::unordered_map type_map; + // Only populate map for whitelisted ops if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { - strings::StrAppend(¶meters, GetTypeAnnotatedParams()); - } else { - for (const auto& param : params_no_default_) { - if (!parameters.empty()) strings::StrAppend(¶meters, ", "); - strings::StrAppend(¶meters, param.GetRenameTo()); + type_map = GetTypeAnnotationMap(); + } + + string parameters; + for (const auto& param : params_no_default_) { + if (!parameters.empty()) strings::StrAppend(¶meters, ", "); + strings::StrAppend(¶meters, param.GetRenameTo()); + + // Add type annotations to param + if (type_map.find(param.GetName()) != type_map.end()) { + if(!type_map[param.GetName()].empty()) { + strings::StrAppend(¶meters, ": ", type_map[param.GetName()]); + } } } @@ -393,7 +407,7 @@ string GenEagerPythonOp::Code() { string eager_not_allowed_error = GetEagerNotAllowedError(); if (!AddEagerFastPathAndGraphCode(parameters_with_defaults, output_sizes, - eager_not_allowed_error)) { + eager_not_allowed_error, type_map)) { return result_; } @@ -405,61 +419,56 @@ string GenEagerPythonOp::Code() { return prelude_ + result_; } -string GenEagerPythonOp::GetTypeAnnotatedParams() { - // holds mappings from param name to its type annotation - std::unordered_map param_type_map; +std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { + std::unordered_map type_map; + // Mapping attrs to TypeVars for (int i = 0; i allowed_types; - bool has_dtype_half = false; for (int t : attr.allowed_values().list().type()) { - if (t == 19) { // DT_HALF = 19 - has_dtype_half = true; - break; - } DataType dtype = static_cast(t); const string py_dtype = python_op_gen_internal::DataTypeToPython(dtype, "_dtypes."); if (dtypes_map.find(py_dtype) != dtypes_map.end()) { @@ -482,9 +486,6 @@ void GenEagerPythonOp::GenerateTypeVars() { } } - // Do not create a type variable that includes the dtype half - if (has_dtype_half) continue; - // If all dtypes are allowed, add them all if (allowed_types.empty()) { for (std::pair map_dtype : dtypes_map) { @@ -509,48 +510,16 @@ void GenEagerPythonOp::GenerateTypeVars() { if(added_typevar) strings::StrAppend(&result_, "\n"); } -void GenEagerPythonOp::AddReturnTypeAnnotation() { - string return_type = ""; +void GenEagerPythonOp::AddReturnTypeAnnotation(std::unordered_map& type_map) { if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); - // If the "type" field is set, the return Tensor has a single DataType - if (arg.type() != 0) { - const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); - if (dtypes_map.find(py_dtype) != dtypes_map.end()) { - strings::StrAppend(&return_type, "_ops.Tensor[", dtypes_map[py_dtype], "]"); + // Add type annotations to param + if (type_map.find(arg.name()) != type_map.end()) { + if (!type_map[arg.name()].empty()) { + result_.erase(result_.length() - 2); + strings::StrAppend(&result_, " -> ", type_map[arg.name()], ":\n"); } } - else { - for (int i = 0; i allowed_types; - for (int t : attr.allowed_values().list().type()) { - // Do not add type annotations when return type can be half - if (t == 19) return; // DT_HALF = 19 - DataType dtype = static_cast(t); - const string py_dtype = python_op_gen_internal::DataTypeToPython(dtype, "_dtypes."); - allowed_types.emplace_back(py_dtype); - } - - std::sort(allowed_types.begin(), allowed_types.end()); - - string typevar_dtypes; - for (std::vector::iterator it = allowed_types.begin(); it != allowed_types.end(); ++it) { - if (!typevar_dtypes.empty()) strings::StrAppend(&typevar_dtypes, ", "); - strings::StrAppend(&typevar_dtypes, *it); - } - - const string type_var_name = "TV_" + op_def_.name() + "_" + attr.name(); - strings::StrAppend(&return_type, "_ops.Tensor[", type_var_name, "]"); - } - } - } - - if (!return_type.empty()) { - result_.erase(result_.length() - 2); - strings::StrAppend(&result_, " -> ", return_type, ":\n"); - } } } @@ -876,7 +845,7 @@ void GenEagerPythonOp::AddEagerFunctionTeardown( bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( const string& parameters, const std::vector& output_sizes, - const string& eager_not_allowed_error) { + const string& eager_not_allowed_error, std::unordered_map& type_map) { if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { GenerateTypeVars(); } @@ -887,7 +856,7 @@ bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( AddExport(); AddDefLine(function_name_, parameters); if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { - AddReturnTypeAnnotation(); + AddReturnTypeAnnotation(type_map); } AddDocStringDescription(); AddDocStringArgs(); From 50763f6e3db1685193c53f0e19405f87f1bcc3b4 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Fri, 19 Jun 2020 01:21:19 +0000 Subject: [PATCH 06/19] Extend typing to variable types, Annotate params with defaults --- tensorflow/python/framework/python_op_gen.cc | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 79c8800418c..1ef3f9c342b 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -361,7 +361,7 @@ string GenEagerPythonOp::Code() { // Add type annotations to param if (type_map.find(param.GetName()) != type_map.end()) { - if(!type_map[param.GetName()].empty()) { + if (!type_map[param.GetName()].empty()) { strings::StrAppend(¶meters, ": ", type_map[param.GetName()]); } } @@ -372,6 +372,19 @@ string GenEagerPythonOp::Code() { if (!parameters.empty()) strings::StrAppend(¶meters, ", "); if (!parameters_with_defaults.empty()) strings::StrAppend(¶meters_with_defaults, ", "); + + // Add type annotations to param_and_default + if (type_map.find(param_and_default.first.GetName()) != type_map.end()) { + if (!type_map[param_and_default.first.GetName()].empty()) { + strings::StrAppend(¶meters, ": ", type_map[param_and_default.first.GetName()]); + strings::StrAppend(¶meters_with_defaults, + param_and_default.first.GetRenameTo(), ": ", + type_map[param_and_default.first.GetName()], " ", + "= ", param_and_default.second); + continue; + } + } + strings::StrAppend(¶meters, param_and_default.first.GetRenameTo()); strings::StrAppend(¶meters_with_defaults, param_and_default.first.GetRenameTo(), "=", @@ -427,6 +440,9 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { if (attr.type() == "type") { const string type_var_name = "TV_" + op_def_.name() + "_" + attr.name(); type_map[attr.name()] = type_var_name; + } else if (attr.type() == "bool" || attr.type() == "float" || + attr.type() == "int" || attr.type() == "bytes") { + type_map[attr.name()] = attr.type(); } } @@ -507,7 +523,7 @@ void GenEagerPythonOp::GenerateTypeVars() { } } - if(added_typevar) strings::StrAppend(&result_, "\n"); + if (added_typevar) strings::StrAppend(&result_, "\n"); } void GenEagerPythonOp::AddReturnTypeAnnotation(std::unordered_map& type_map) { From 8e70fe45468fd1c0b845814907bdf7d930e30f46 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Fri, 19 Jun 2020 02:09:23 +0000 Subject: [PATCH 07/19] Generate TypeVar name once, Change input & attr enumeration --- tensorflow/python/framework/python_op_gen.cc | 21 +++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 1ef3f9c342b..4ecfccb611f 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -180,10 +180,10 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { void AddRawOpExport(const string& parameters); - void GenerateTypeVars(); - std::unordered_map GetTypeAnnotationMap(); + void GenerateTypeVars(std::unordered_map& type_map); + void AddReturnTypeAnnotation(std::unordered_map& type_map); void AddAttrForArg(const string& attr, int arg_index) { @@ -435,8 +435,7 @@ string GenEagerPythonOp::Code() { std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { std::unordered_map type_map; // Mapping attrs to TypeVars - for (int i = 0; i GenEagerPythonOp::GetTypeAnnotationMap() { } // Mapping input Tensors to their types - for (int i = 0; i < op_def_.input_arg_size(); ++i) { - const auto& arg = op_def_.input_arg(i); + for (const auto& arg : op_def_.input_arg()) { // Do not add type annotations to args that accept a sequence of tensors if (!arg.number_attr().empty()) continue; string type_annotation; @@ -466,7 +464,7 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { type_map[arg.name()] = type_annotation; } - // Mapping output Tensor to its types + // Mapping output Tensor to its type if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); string type_annotation; @@ -488,10 +486,9 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { } // Generate TypeVars using attrs -void GenEagerPythonOp::GenerateTypeVars() { +void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type_map) { bool added_typevar = false; - for (int i = 0; i allowed_types; for (int t : attr.allowed_values().list().type()) { @@ -517,7 +514,7 @@ void GenEagerPythonOp::GenerateTypeVars() { strings::StrAppend(&typevar_dtypes, *it); } - const string type_var_name = "TV_" + op_def_.name() + "_" + attr.name(); + const string type_var_name = type_map[attr.name()]; strings::StrAppend(&result_, type_var_name, " = TypeVar(\"", type_var_name, "\", ", typevar_dtypes,")\n"); added_typevar = true; } @@ -863,7 +860,7 @@ bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( const string& parameters, const std::vector& output_sizes, const string& eager_not_allowed_error, std::unordered_map& type_map) { if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { - GenerateTypeVars(); + GenerateTypeVars(type_map); } if (api_def_.visibility() == ApiDef::VISIBLE) { strings::StrAppend(&result_, "@_dispatch.add_dispatch_list\n"); From 2399b25e139c729c0f0efd0efe5a009af04ef773 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Fri, 19 Jun 2020 02:49:05 +0000 Subject: [PATCH 08/19] Change variable names, Remove comments --- tensorflow/python/framework/python_op_gen.cc | 84 ++++++++++---------- 1 file changed, 41 insertions(+), 43 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 4ecfccb611f..062a9aa01e4 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -45,7 +45,7 @@ const int kRightMargin = 78; constexpr char kEagerFallbackSuffix[] = "_eager_fallback"; -std::unordered_map dtypes_map { +std::unordered_map dtype_type { {"_dtypes.float16", "_dtypes.Float16"}, {"_dtypes.half", "_dtypes.Half"}, {"_dtypes.float32", "_dtypes.Float32"}, @@ -164,7 +164,7 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { bool AddEagerFastPathAndGraphCode(const string& parameters, const std::vector& output_sizes, const string& eager_not_allowed_error, - std::unordered_map& type_map); + std::unordered_map& type_annotations); bool AddEagerFallbackCode(const string& parameters, const std::vector& output_sizes, const string& num_outputs_expr, @@ -182,9 +182,9 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { std::unordered_map GetTypeAnnotationMap(); - void GenerateTypeVars(std::unordered_map& type_map); + void GenerateTypeVars(std::unordered_map& type_annotations); - void AddReturnTypeAnnotation(std::unordered_map& type_map); + void AddReturnTypeAnnotation(std::unordered_map& type_annotations); void AddAttrForArg(const string& attr, int arg_index) { gtl::InsertIfNotPresent(&inferred_attrs_, attr, @@ -348,10 +348,10 @@ string GenEagerPythonOp::Code() { param_names_.push_back(param_and_default.first); } - std::unordered_map type_map; + std::unordered_map type_annotations; // Only populate map for whitelisted ops if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { - type_map = GetTypeAnnotationMap(); + type_annotations = GetTypeAnnotationMap(); } string parameters; @@ -360,9 +360,9 @@ string GenEagerPythonOp::Code() { strings::StrAppend(¶meters, param.GetRenameTo()); // Add type annotations to param - if (type_map.find(param.GetName()) != type_map.end()) { - if (!type_map[param.GetName()].empty()) { - strings::StrAppend(¶meters, ": ", type_map[param.GetName()]); + if (type_annotations.find(param.GetName()) != type_annotations.end()) { + if (!type_annotations[param.GetName()].empty()) { + strings::StrAppend(¶meters, ": ", type_annotations[param.GetName()]); } } } @@ -374,12 +374,12 @@ string GenEagerPythonOp::Code() { strings::StrAppend(¶meters_with_defaults, ", "); // Add type annotations to param_and_default - if (type_map.find(param_and_default.first.GetName()) != type_map.end()) { - if (!type_map[param_and_default.first.GetName()].empty()) { - strings::StrAppend(¶meters, ": ", type_map[param_and_default.first.GetName()]); + if (type_annotations.find(param_and_default.first.GetName()) != type_annotations.end()) { + if (!type_annotations[param_and_default.first.GetName()].empty()) { + strings::StrAppend(¶meters, ": ", type_annotations[param_and_default.first.GetName()]); strings::StrAppend(¶meters_with_defaults, param_and_default.first.GetRenameTo(), ": ", - type_map[param_and_default.first.GetName()], " ", + type_annotations[param_and_default.first.GetName()], " ", "= ", param_and_default.second); continue; } @@ -420,7 +420,7 @@ string GenEagerPythonOp::Code() { string eager_not_allowed_error = GetEagerNotAllowedError(); if (!AddEagerFastPathAndGraphCode(parameters_with_defaults, output_sizes, - eager_not_allowed_error, type_map)) { + eager_not_allowed_error, type_annotations)) { return result_; } @@ -433,60 +433,58 @@ string GenEagerPythonOp::Code() { } std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { - std::unordered_map type_map; + std::unordered_map type_annotations; // Mapping attrs to TypeVars for (const auto& attr : op_def_.attr()) { if (attr.type() == "type") { const string type_var_name = "TV_" + op_def_.name() + "_" + attr.name(); - type_map[attr.name()] = type_var_name; + type_annotations[attr.name()] = type_var_name; } else if (attr.type() == "bool" || attr.type() == "float" || attr.type() == "int" || attr.type() == "bytes") { - type_map[attr.name()] = attr.type(); + type_annotations[attr.name()] = attr.type(); } } // Mapping input Tensors to their types for (const auto& arg : op_def_.input_arg()) { - // Do not add type annotations to args that accept a sequence of tensors + // Do not add type annotations to args that accept a sequence of Tensors if (!arg.number_attr().empty()) continue; string type_annotation; - if (type_map.find(arg.type_attr()) != type_map.end()) { + if (type_annotations.find(arg.type_attr()) != type_annotations.end()) { // Get the correct TypeVar if input maps to an attr - strings::StrAppend(&type_annotation, "_ops.Tensor[", type_map[arg.type_attr()], "]"); + strings::StrAppend(&type_annotation, "_ops.Tensor[", type_annotations[arg.type_attr()], "]"); } else { // Get the dtype of the Tensor const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); - if (dtypes_map.find(py_dtype) != dtypes_map.end()) { - strings::StrAppend(&type_annotation, "_ops.Tensor[", dtypes_map[py_dtype], "]"); + if (dtype_type.find(py_dtype) != dtype_type.end()) { + strings::StrAppend(&type_annotation, "_ops.Tensor[", dtype_type[py_dtype], "]"); } } - type_map[arg.name()] = type_annotation; + type_annotations[arg.name()] = type_annotation; } // Mapping output Tensor to its type if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); string type_annotation; - if (type_map.find(arg.type_attr()) != type_map.end()) { - // Get the correct TypeVar if input maps to an attr - strings::StrAppend(&type_annotation, "_ops.Tensor[", type_map[arg.type_attr()], "]"); + if (type_annotations.find(arg.type_attr()) != type_annotations.end()) { + strings::StrAppend(&type_annotation, "_ops.Tensor[", type_annotations[arg.type_attr()], "]"); } else { - // Get the dtype of the Tensor const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); - if (dtypes_map.find(py_dtype) != dtypes_map.end()) { - strings::StrAppend(&type_annotation, "_ops.Tensor[", dtypes_map[py_dtype], "]"); + if (dtype_type.find(py_dtype) != dtype_type.end()) { + strings::StrAppend(&type_annotation, "_ops.Tensor[", dtype_type[py_dtype], "]"); } } - type_map[arg.name()] = type_annotation; + type_annotations[arg.name()] = type_annotation; } - return type_map; + return type_annotations; } // Generate TypeVars using attrs -void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type_map) { +void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type_annotations) { bool added_typevar = false; for (const auto& attr : op_def_.attr()) { if (attr.type() == "type") { @@ -494,14 +492,14 @@ void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type for (int t : attr.allowed_values().list().type()) { DataType dtype = static_cast(t); const string py_dtype = python_op_gen_internal::DataTypeToPython(dtype, "_dtypes."); - if (dtypes_map.find(py_dtype) != dtypes_map.end()) { - allowed_types.emplace_back(dtypes_map[py_dtype]); + if (dtype_type.find(py_dtype) != dtype_type.end()) { + allowed_types.emplace_back(dtype_type[py_dtype]); } } // If all dtypes are allowed, add them all if (allowed_types.empty()) { - for (std::pair map_dtype : dtypes_map) { + for (std::pair map_dtype : dtype_type) { allowed_types.emplace_back(map_dtype.second); } } @@ -514,7 +512,7 @@ void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type strings::StrAppend(&typevar_dtypes, *it); } - const string type_var_name = type_map[attr.name()]; + const string type_var_name = type_annotations[attr.name()]; strings::StrAppend(&result_, type_var_name, " = TypeVar(\"", type_var_name, "\", ", typevar_dtypes,")\n"); added_typevar = true; } @@ -523,14 +521,14 @@ void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type if (added_typevar) strings::StrAppend(&result_, "\n"); } -void GenEagerPythonOp::AddReturnTypeAnnotation(std::unordered_map& type_map) { +void GenEagerPythonOp::AddReturnTypeAnnotation(std::unordered_map& type_annotations) { if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); // Add type annotations to param - if (type_map.find(arg.name()) != type_map.end()) { - if (!type_map[arg.name()].empty()) { + if (type_annotations.find(arg.name()) != type_annotations.end()) { + if (!type_annotations[arg.name()].empty()) { result_.erase(result_.length() - 2); - strings::StrAppend(&result_, " -> ", type_map[arg.name()], ":\n"); + strings::StrAppend(&result_, " -> ", type_annotations[arg.name()], ":\n"); } } } @@ -858,9 +856,9 @@ void GenEagerPythonOp::AddEagerFunctionTeardown( bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( const string& parameters, const std::vector& output_sizes, - const string& eager_not_allowed_error, std::unordered_map& type_map) { + const string& eager_not_allowed_error, std::unordered_map& type_annotations) { if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { - GenerateTypeVars(type_map); + GenerateTypeVars(type_annotations); } if (api_def_.visibility() == ApiDef::VISIBLE) { strings::StrAppend(&result_, "@_dispatch.add_dispatch_list\n"); @@ -869,7 +867,7 @@ bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( AddExport(); AddDefLine(function_name_, parameters); if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { - AddReturnTypeAnnotation(type_map); + AddReturnTypeAnnotation(type_annotations); } AddDocStringDescription(); AddDocStringArgs(); From e51d4027b323bef5d6042ee0b18d617193211ac8 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Fri, 19 Jun 2020 21:49:53 +0000 Subject: [PATCH 09/19] Add annotations to fallback ops --- tensorflow/python/framework/python_op_gen.cc | 56 ++++++++++---------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 062a9aa01e4..cdef72a155d 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -168,7 +168,8 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { bool AddEagerFallbackCode(const string& parameters, const std::vector& output_sizes, const string& num_outputs_expr, - const string& eager_not_allowed_error); + const string& eager_not_allowed_error, + std::unordered_map& type_annotations); void AddEagerFastPathExecute(); void AddEagerInferredAttrs(const string& indentation); @@ -355,18 +356,19 @@ string GenEagerPythonOp::Code() { } string parameters; + // Param can be an input or an attr for (const auto& param : params_no_default_) { if (!parameters.empty()) strings::StrAppend(¶meters, ", "); strings::StrAppend(¶meters, param.GetRenameTo()); // Add type annotations to param if (type_annotations.find(param.GetName()) != type_annotations.end()) { - if (!type_annotations[param.GetName()].empty()) { - strings::StrAppend(¶meters, ": ", type_annotations[param.GetName()]); - } + strings::StrAppend(¶meters, ": ", type_annotations[param.GetName()]); } } + // Append to parameters and parameters_with_defaults because multiple functions + // are generated (op and fallback op) string parameters_with_defaults = parameters; for (const auto& param_and_default : params_with_default_) { if (!parameters.empty()) strings::StrAppend(¶meters, ", "); @@ -375,14 +377,12 @@ string GenEagerPythonOp::Code() { // Add type annotations to param_and_default if (type_annotations.find(param_and_default.first.GetName()) != type_annotations.end()) { - if (!type_annotations[param_and_default.first.GetName()].empty()) { - strings::StrAppend(¶meters, ": ", type_annotations[param_and_default.first.GetName()]); - strings::StrAppend(¶meters_with_defaults, - param_and_default.first.GetRenameTo(), ": ", - type_annotations[param_and_default.first.GetName()], " ", - "= ", param_and_default.second); - continue; - } + const string param_type = type_annotations[param_and_default.first.GetName()]; + strings::StrAppend(¶meters, param_and_default.first.GetRenameTo(), ": ", param_type); + strings::StrAppend(¶meters_with_defaults, + param_and_default.first.GetRenameTo(), ": ", + param_type, " = ", param_and_default.second); + continue; } strings::StrAppend(¶meters, param_and_default.first.GetRenameTo()); @@ -425,7 +425,7 @@ string GenEagerPythonOp::Code() { } if (!AddEagerFallbackCode(parameters, output_sizes, num_outputs_expr, - eager_not_allowed_error)) { + eager_not_allowed_error, type_annotations)) { return result_; } @@ -449,35 +449,29 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { for (const auto& arg : op_def_.input_arg()) { // Do not add type annotations to args that accept a sequence of Tensors if (!arg.number_attr().empty()) continue; - string type_annotation; if (type_annotations.find(arg.type_attr()) != type_annotations.end()) { // Get the correct TypeVar if input maps to an attr - strings::StrAppend(&type_annotation, "_ops.Tensor[", type_annotations[arg.type_attr()], "]"); + type_annotations[arg.name()] = "_ops.Tensor[" + type_annotations[arg.type_attr()] + "]"; } else { // Get the dtype of the Tensor const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); if (dtype_type.find(py_dtype) != dtype_type.end()) { - strings::StrAppend(&type_annotation, "_ops.Tensor[", dtype_type[py_dtype], "]"); + type_annotations[arg.name()] = "_ops.Tensor[" + dtype_type[py_dtype] + "]"; } } - - type_annotations[arg.name()] = type_annotation; } // Mapping output Tensor to its type if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); - string type_annotation; if (type_annotations.find(arg.type_attr()) != type_annotations.end()) { - strings::StrAppend(&type_annotation, "_ops.Tensor[", type_annotations[arg.type_attr()], "]"); + type_annotations[arg.name()] = "_ops.Tensor[" + type_annotations[arg.type_attr()] + "]"; } else { const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); if (dtype_type.find(py_dtype) != dtype_type.end()) { - strings::StrAppend(&type_annotation, "_ops.Tensor[", dtype_type[py_dtype], "]"); + type_annotations[arg.name()] = "_ops.Tensor[" + dtype_type[py_dtype] + "]"; } } - - type_annotations[arg.name()] = type_annotation; } return type_annotations; @@ -521,19 +515,20 @@ void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type if (added_typevar) strings::StrAppend(&result_, "\n"); } +// TODO(rahulkamat): Modify AddDefLine() to add return type annotation void GenEagerPythonOp::AddReturnTypeAnnotation(std::unordered_map& type_annotations) { if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); // Add type annotations to param if (type_annotations.find(arg.name()) != type_annotations.end()) { - if (!type_annotations[arg.name()].empty()) { - result_.erase(result_.length() - 2); - strings::StrAppend(&result_, " -> ", type_annotations[arg.name()], ":\n"); - } + result_.erase(result_.length() - 2); + strings::StrAppend(&result_, " -> ", type_annotations[arg.name()], ":\n"); } } } + + void GenEagerPythonOp::HandleGraphMode( const string& function_setup, const std::vector& output_sizes) { strings::StrAppend(&result_, " # Add nodes to the TensorFlow graph.\n"); @@ -903,11 +898,14 @@ bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( bool GenEagerPythonOp::AddEagerFallbackCode( const string& parameters, const std::vector& output_sizes, - const string& num_outputs_expr, const string& eager_not_allowed_error) { + const string& num_outputs_expr, const string& eager_not_allowed_error, + std::unordered_map& type_annotations) { AddDefLine( strings::StrCat(function_name_, kEagerFallbackSuffix), strings::StrCat(parameters, parameters.empty() ? "" : ", ", "ctx")); - + if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { + AddReturnTypeAnnotation(type_annotations); + } if (!eager_not_allowed_error.empty()) { strings::StrAppend(&result_, " ", eager_not_allowed_error); return true; From 142ddd38a5cc4acf6f6880b64751376267ab527a Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Mon, 22 Jun 2020 03:34:21 +0000 Subject: [PATCH 10/19] Extract finding arg annotation logic into helper function --- tensorflow/python/framework/python_op_gen.cc | 37 +++++++++----------- tensorflow/python/framework/python_op_gen.h | 6 ++++ 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 116d86017be..42fb88bec88 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -449,29 +449,13 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { for (const auto& arg : op_def_.input_arg()) { // Do not add type annotations to args that accept a sequence of Tensors if (!arg.number_attr().empty()) continue; - if (type_annotations.find(arg.type_attr()) != type_annotations.end()) { - // Get the correct TypeVar if input maps to an attr - type_annotations[arg.name()] = "_ops.Tensor[" + type_annotations[arg.type_attr()] + "]"; - } else { - // Get the dtype of the Tensor - const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); - if (dtype_type.find(py_dtype) != dtype_type.end()) { - type_annotations[arg.name()] = "_ops.Tensor[" + dtype_type[py_dtype] + "]"; - } - } + type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations); } // Mapping output Tensor to its type if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); - if (type_annotations.find(arg.type_attr()) != type_annotations.end()) { - type_annotations[arg.name()] = "_ops.Tensor[" + type_annotations[arg.type_attr()] + "]"; - } else { - const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); - if (dtype_type.find(py_dtype) != dtype_type.end()) { - type_annotations[arg.name()] = "_ops.Tensor[" + dtype_type[py_dtype] + "]"; - } - } + type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations); } return type_annotations; @@ -527,8 +511,6 @@ void GenEagerPythonOp::AddReturnTypeAnnotation(std::unordered_map& output_sizes) { strings::StrAppend(&result_, " # Add nodes to the TensorFlow graph.\n"); @@ -1262,4 +1244,19 @@ string GetPythonWrappers(const char* op_list_buf, size_t op_list_len) { return GetPythonOpsImpl(ops, api_def_map, {}); } +string GetArgAnnotation(const auto& arg, std::unordered_map& type_annotations) { + if (type_annotations.find(arg.type_attr()) != type_annotations.end()) { + // Get the correct TypeVar if arg maps to an attr + return "_ops.Tensor[" + type_annotations[arg.type_attr()] + "]"; + } else { + // Get the dtype of the Tensor + const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); + if (dtype_type.find(py_dtype) != dtype_type.end()) { + return "_ops.Tensor[" + dtype_type[py_dtype] + "]"; + } + } + + return "Any"; +} + } // namespace tensorflow diff --git a/tensorflow/python/framework/python_op_gen.h b/tensorflow/python/framework/python_op_gen.h index f1cd6e49013..8d5eb68bed1 100644 --- a/tensorflow/python/framework/python_op_gen.h +++ b/tensorflow/python/framework/python_op_gen.h @@ -49,6 +49,12 @@ void PrintPythonOps(const OpList& ops, const ApiDefMap& api_defs, // length of that buffer. string GetPythonWrappers(const char* op_list_buf, size_t op_list_len); +// Get the type annotation for an arg +// `arg` should be an input or output of an op +// `type_annotations` should contain attr names mapped to TypeVar names +string GetArgAnnotation(const auto& arg, + std::unordered_map& type_annotations); + } // namespace tensorflow #endif // TENSORFLOW_PYTHON_FRAMEWORK_PYTHON_OP_GEN_H_ From 7789ccd70067ac288dbe12a5ab525588a32f3437 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Tue, 23 Jun 2020 00:18:56 +0000 Subject: [PATCH 11/19] Pass whitelist op set from op gen main --- tensorflow/python/framework/python_op_gen.cc | 37 ++++++++++--------- tensorflow/python/framework/python_op_gen.h | 6 ++- .../framework/python_op_gen_internal.cc | 3 +- .../python/framework/python_op_gen_internal.h | 3 +- .../python/framework/python_op_gen_main.cc | 20 +++++++--- 5 files changed, 41 insertions(+), 28 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 42fb88bec88..0b6f974d962 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -72,10 +72,6 @@ std::unordered_map dtype_type { {"_dtypes.variant", "_dtypes.Variant"} }; -// Add op name to this set to add type annotations -std::unordered_set type_annotate_ops { -}; - string AttrVarName(const string& attr_name, std::unordered_map* attr_expressions) { const string var = strings::StrCat("_attr_", attr_name); @@ -137,8 +133,8 @@ string TensorPBString(const TensorProto& pb) { class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { public: GenEagerPythonOp(const OpDef& op_def, const ApiDef& api_def, - const string& function_name) - : python_op_gen_internal::GenPythonOp(op_def, api_def, function_name) { + const string& function_name, const bool type_annotate_op) + : python_op_gen_internal::GenPythonOp(op_def, api_def, function_name, type_annotate_op) { op_name_ = function_name_; absl::ConsumePrefix(&op_name_, "_"); } @@ -218,8 +214,8 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { }; string GetEagerPythonOp(const OpDef& op_def, const ApiDef& api_def, - const string& function_name) { - return GenEagerPythonOp(op_def, api_def, function_name).Code(); + const string& function_name, const bool type_annotate_op) { + return GenEagerPythonOp(op_def, api_def, function_name, type_annotate_op).Code(); } string GenEagerPythonOp::FlattenInputs( @@ -351,7 +347,7 @@ string GenEagerPythonOp::Code() { std::unordered_map type_annotations; // Only populate map for whitelisted ops - if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { + if (type_annotate_op_) { type_annotations = GetTypeAnnotationMap(); } @@ -834,7 +830,7 @@ void GenEagerPythonOp::AddEagerFunctionTeardown( bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( const string& parameters, const std::vector& output_sizes, const string& eager_not_allowed_error, std::unordered_map& type_annotations) { - if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { + if (type_annotate_op_) { GenerateTypeVars(type_annotations); } if (api_def_.visibility() == ApiDef::VISIBLE) { @@ -843,7 +839,7 @@ bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( AddExport(); AddDefLine(function_name_, parameters); - if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { + if (type_annotate_op_) { AddReturnTypeAnnotation(type_annotations); } AddDocStringDescription(); @@ -885,7 +881,7 @@ bool GenEagerPythonOp::AddEagerFallbackCode( AddDefLine( strings::StrCat(function_name_, kEagerFallbackSuffix), strings::StrCat(parameters, parameters.empty() ? "" : ", ", "ctx")); - if (type_annotate_ops.find(op_def_.name()) != type_annotate_ops.end()) { + if (type_annotate_op_) { AddReturnTypeAnnotation(type_annotations); } if (!eager_not_allowed_error.empty()) { @@ -1136,7 +1132,8 @@ void GenEagerPythonOp::AddRawOpExport(const string& parameters) { string GetPythonOpsImpl(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, - const string& source_file_name = "") { + const string& source_file_name = "", + std::unordered_set type_annotate_ops = {}) { string result; // Header // TODO(josh11b): Mention the library for which wrappers are being generated. @@ -1214,8 +1211,10 @@ from typing import TypeVar continue; } + const bool type_annotate_op = type_annotate_ops.find(op_def.name()) != type_annotate_ops.end(); + strings::StrAppend(&result, - GetEagerPythonOp(op_def, *api_def, function_name)); + GetEagerPythonOp(op_def, *api_def, function_name, type_annotate_op)); } return result; @@ -1225,15 +1224,17 @@ from typing import TypeVar string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, - const string& source_file_name) { - return GetPythonOpsImpl(ops, api_defs, hidden_ops, source_file_name); + const string& source_file_name, + std::unordered_set type_annotate_ops) { + return GetPythonOpsImpl(ops, api_defs, hidden_ops, source_file_name, type_annotate_ops); } void PrintPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, - const string& source_file_name) { + const string& source_file_name, + std::unordered_set type_annotate_ops) { printf("%s", - GetPythonOpsImpl(ops, api_defs, hidden_ops, source_file_name).c_str()); + GetPythonOpsImpl(ops, api_defs, hidden_ops, source_file_name, type_annotate_ops).c_str()); } string GetPythonWrappers(const char* op_list_buf, size_t op_list_len) { diff --git a/tensorflow/python/framework/python_op_gen.h b/tensorflow/python/framework/python_op_gen.h index 8d5eb68bed1..1a3b6c5e8f2 100644 --- a/tensorflow/python/framework/python_op_gen.h +++ b/tensorflow/python/framework/python_op_gen.h @@ -32,7 +32,8 @@ namespace tensorflow { // file where the ops' REGISTER_OP() calls reside. string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, - const string& source_file_name); + const string& source_file_name, + std::unordered_set type_annotate_ops); // Prints the output of GetPrintOps to stdout. // hidden_ops should be a list of Op names that should get a leading _ @@ -41,7 +42,8 @@ string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs, // where the ops' REGISTER_OP() calls reside. void PrintPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, - const string& source_file_name); + const string& source_file_name, + std::unordered_set type_annotate_ops); // Get the python wrappers for a list of ops in a OpList. // `op_list_buf` should be a pointer to a buffer containing diff --git a/tensorflow/python/framework/python_op_gen_internal.cc b/tensorflow/python/framework/python_op_gen_internal.cc index 05102db0189..d0ef82857c4 100644 --- a/tensorflow/python/framework/python_op_gen_internal.cc +++ b/tensorflow/python/framework/python_op_gen_internal.cc @@ -513,10 +513,11 @@ const ApiDef::Attr* FindAttr(StringPiece name, const ApiDef& api_def) { } GenPythonOp::GenPythonOp(const OpDef& op_def, const ApiDef& api_def, - const string& function_name) + const string& function_name, const bool type_annotate_op) : op_def_(op_def), api_def_(api_def), function_name_(function_name), + type_annotate_op_(type_annotate_op), num_outs_(op_def.output_arg_size()) {} GenPythonOp::~GenPythonOp() {} diff --git a/tensorflow/python/framework/python_op_gen_internal.h b/tensorflow/python/framework/python_op_gen_internal.h index 094eb702438..5229bffc5d0 100644 --- a/tensorflow/python/framework/python_op_gen_internal.h +++ b/tensorflow/python/framework/python_op_gen_internal.h @@ -71,7 +71,7 @@ class ParamNames { class GenPythonOp { public: GenPythonOp(const OpDef& op_def, const ApiDef& api_def, - const string& function_name); + const string& function_name, const bool type_annotate_op_); virtual ~GenPythonOp(); virtual string Code(); @@ -98,6 +98,7 @@ class GenPythonOp { const OpDef& op_def_; const ApiDef& api_def_; const string function_name_; + const bool type_annotate_op_; const int num_outs_; // Return value from Code() is prelude_ + result_. diff --git a/tensorflow/python/framework/python_op_gen_main.cc b/tensorflow/python/framework/python_op_gen_main.cc index 2f0ef70fe67..dcaea53100e 100644 --- a/tensorflow/python/framework/python_op_gen_main.cc +++ b/tensorflow/python/framework/python_op_gen_main.cc @@ -108,7 +108,8 @@ string InferSourceFileName(const char* argv_zero) { void PrintAllPythonOps(const std::vector& op_list, const std::vector& api_def_dirs, const string& source_file_name, - bool op_list_is_whitelist) { + bool op_list_is_whitelist, + std::unordered_set type_annotate_ops) { OpList ops; OpRegistry::Global()->Export(false, &ops); @@ -133,9 +134,9 @@ void PrintAllPythonOps(const std::vector& op_list, *pruned_ops.mutable_op()->Add() = op_def; } } - PrintPythonOps(pruned_ops, api_def_map, {}, source_file_name); + PrintPythonOps(pruned_ops, api_def_map, {}, source_file_name, type_annotate_ops); } else { - PrintPythonOps(ops, api_def_map, op_list, source_file_name); + PrintPythonOps(ops, api_def_map, op_list, source_file_name, type_annotate_ops); } } @@ -157,19 +158,26 @@ int main(int argc, char* argv[]) { std::vector api_def_dirs = tensorflow::str_util::Split( argv[1], ",", tensorflow::str_util::SkipEmpty()); + // Add op name to this set to add type annotations + std::unordered_set type_annotate_ops { + }; + if (argc == 2) { tensorflow::PrintAllPythonOps({}, api_def_dirs, source_file_name, - false /* op_list_is_whitelist */); + false /* op_list_is_whitelist */, + type_annotate_ops); } else if (argc == 3) { std::vector hidden_ops; TF_CHECK_OK(tensorflow::ParseOpListCommandLine(argv[2], &hidden_ops)); tensorflow::PrintAllPythonOps(hidden_ops, api_def_dirs, source_file_name, - false /* op_list_is_whitelist */); + false /* op_list_is_whitelist */, + type_annotate_ops); } else if (argc == 4) { std::vector op_list; TF_CHECK_OK(tensorflow::ParseOpListCommandLine(argv[2], &op_list)); tensorflow::PrintAllPythonOps(op_list, api_def_dirs, source_file_name, - tensorflow::string(argv[3]) == "1"); + tensorflow::string(argv[3]) == "1", + type_annotate_ops); } else { return -1; } From 8bec96c5a8a14239a4b607e5e4c4ba1c5a9971f8 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Tue, 23 Jun 2020 00:44:39 +0000 Subject: [PATCH 12/19] Add tests to verify type annotations are correctly added --- .../python/framework/python_op_gen_test.cc | 251 +++++++++++++++++- 1 file changed, 249 insertions(+), 2 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen_test.cc b/tensorflow/python/framework/python_op_gen_test.cc index 5185086fdd3..2561195c407 100644 --- a/tensorflow/python/framework/python_op_gen_test.cc +++ b/tensorflow/python/framework/python_op_gen_test.cc @@ -20,22 +20,269 @@ limitations under the License. #include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/platform/test.h" +#include "tensorflow/core/lib/io/path.h" +#include "tensorflow/core/lib/strings/str_util.h" + namespace tensorflow { namespace { +constexpr char kBaseOpDef[] = R"( +op { + name: "Foo" + input_arg { + name: "x" + type_attr: "T" + } + input_arg { + name: "y" + type_attr: "T2" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_UINT8 + type: DT_INT8 + } + } + } + attr { + name: "T2" + type: "type" + allowed_values { + list { + type: DT_STRING + type: DT_FLOAT + type: DT_DOUBLE + } + } + } + summary: "Summary for op Foo." + description: "Description for op Foo." +} +op { + name: "Bar" + input_arg { + name: "x" + type: DT_STRING + } + input_arg { + name: "y" + type: DT_QINT8 + } + output_arg { + name: "output" + type: DT_BOOL + } + summary: "Summary for op Bar." + description: "Description for op Bar." +} +op { + name: "FooBar" + input_arg { + name: "x" + type: DT_FLOAT + } + output_arg { + name: "output" + type: DT_BOOL + } + attr { + name: "t" + type: "type" + allowed_values { + list { + type: DT_HALF + type: DT_INT8 + } + } + } + attr { + name: "var1" + type: "bool" + default_value { + b: false + } + } + attr { + name: "var2" + type: "int" + default_value { + i: 0 + } + } + summary: "Summary for op FooBar." + description: "Description for op FooBar." +} +op { + name: "Baz" + input_arg { + name: "inputs" + number_attr: "N" + type_list_attr: "T" + } + output_arg { + name: "output1" + type: DT_BOOL + } + output_arg { + name: "output2" + type: DT_BOOL + } + attr { + name: "T" + type: "bool" + } + attr { + name: "N" + type: "int" + } + summary: "Summary for op Baz." + description: "Description for op Baz." +} +)"; + +std::unordered_set type_annotate_ops { + "Foo", + "Bar", + "FooBar", + "Baz" +}; + + +void ExpectHasSubstr(const string& s, const string& expected) { + EXPECT_TRUE(absl::StrContains(s, expected)) + << "'Generated ops does not contain '" << expected << "'"; +} + +void ExpectDoesNotHaveSubstr(const string& s, const string& expected) { + EXPECT_FALSE(absl::StrContains(s, expected)) + << "'Generated ops contains '" << expected << "'"; +} + +void ExpectSubstrOrder(const string& s, const string& before, + const string& after) { + int before_pos = s.find(before); + int after_pos = s.find(after); + ASSERT_NE(std::string::npos, before_pos); + ASSERT_NE(std::string::npos, after_pos); + EXPECT_LT(before_pos, after_pos) + << before << "' is not before '" << after; +} + TEST(PythonOpGen, Basic) { OpList ops; OpRegistry::Global()->Export(false, &ops); ApiDefMap api_def_map(ops); - string code = GetPythonOps(ops, api_def_map, {}, ""); + string code = GetPythonOps(ops, api_def_map, {}, "", {}); EXPECT_TRUE(absl::StrContains(code, "def case")); - // TODO(mdan): Add tests to verify type annotations are correctly added. } +TEST(PythonOpGen, TypeAnnotateSingleTypeTensor) { + OpList op_defs; + OpRegistry::Global()->Export(false, &op_defs); + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + ApiDefMap api_def_map(op_defs); + + string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); + + const string typed_bar = "def bar(x: _ops.Tensor[_dtypes.String], y: _ops.Tensor[_dtypes.QInt8], name=None) -> _ops.Tensor[_dtypes.Bool]:"; + ExpectHasSubstr(code, typed_bar); + + const string untyped_bar = "def bar(x, y, name=None):"; + ExpectDoesNotHaveSubstr(code, untyped_bar); +} + +TEST(PythonOpGen, TypeAnnotateMultiTypeTensor) { + OpList op_defs; + OpRegistry::Global()->Export(false, &op_defs); + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + ApiDefMap api_def_map(op_defs); + + string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); + + const string typed_foo = "def foo(x: _ops.Tensor[TV_Foo_T], y: _ops.Tensor[TV_Foo_T2], name=None) -> _ops.Tensor[TV_Foo_T]:"; + ExpectHasSubstr(code, typed_foo); +} + +TEST(PythonOpGen, GenerateCorrectTypeVars) { + OpList op_defs; + OpRegistry::Global()->Export(false, &op_defs); + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + ApiDefMap api_def_map(op_defs); + + string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); + + const string typevars_foo = R"( +TV_Foo_T = TypeVar("TV_Foo_T", _dtypes.Int8, _dtypes.UInt8) +TV_Foo_T2 = TypeVar("TV_Foo_T2", _dtypes.Float32, _dtypes.Float64, _dtypes.String) +)"; + + ExpectHasSubstr(code, typevars_foo); +} + +TEST(PythonOpGen, TypeAnnotateFallback) { + OpList op_defs; + OpRegistry::Global()->Export(false, &op_defs); + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + ApiDefMap api_def_map(op_defs); + + string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); + + const string typed_foo_fallback = "def foo_eager_fallback(x: _ops.Tensor[TV_Foo_T], y: _ops.Tensor[TV_Foo_T2], name, ctx) -> _ops.Tensor[TV_Foo_T]:"; + ExpectHasSubstr(code, typed_foo_fallback); +} + +TEST(PythonOpGen, GenerateTypeVarAboveOp) { + OpList op_defs; + OpRegistry::Global()->Export(false, &op_defs); + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + ApiDefMap api_def_map(op_defs); + + string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); + + const string typevar_foo = "TV_Foo_"; + const string def_foo = "def foo"; + ExpectSubstrOrder(code, typevar_foo, def_foo); +} + + +TEST(PythonOpGen, TypeAnnotateDefaultParams) { + OpList op_defs; + OpRegistry::Global()->Export(false, &op_defs); + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + ApiDefMap api_def_map(op_defs); + + string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); + + const string params = "def foo_bar(x: _ops.Tensor[_dtypes.Float32], t: TV_FooBar_t, var1: bool = False, var2: int = 0, name=None)"; + const string params_fallback = "def foo_bar_eager_fallback(x: _ops.Tensor[_dtypes.Float32], t: TV_FooBar_t, var1: bool, var2: int, name, ctx)"; + ExpectHasSubstr(code, params); + ExpectHasSubstr(code, params_fallback); +} + +TEST(PythonOpGen, NoTypingSequenceTensors) { + OpList op_defs; + OpRegistry::Global()->Export(false, &op_defs); + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + ApiDefMap api_def_map(op_defs); + + string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); + + const string baz_def_line = "def baz(inputs, name=None):"; + + ExpectHasSubstr(code, baz_def_line); +} + // TODO(mdan): Include more tests with synhtetic ops and api defs. } // namespace From 01fdbb866b4350acd74370d0a4b182feb4a056d8 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Tue, 23 Jun 2020 02:46:05 +0000 Subject: [PATCH 13/19] Remove imports from python_op_gen_test --- tensorflow/python/framework/python_op_gen_test.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen_test.cc b/tensorflow/python/framework/python_op_gen_test.cc index 2561195c407..cf6566ea7ae 100644 --- a/tensorflow/python/framework/python_op_gen_test.cc +++ b/tensorflow/python/framework/python_op_gen_test.cc @@ -20,9 +20,6 @@ limitations under the License. #include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/platform/test.h" -#include "tensorflow/core/lib/io/path.h" -#include "tensorflow/core/lib/strings/str_util.h" - namespace tensorflow { namespace { From 7f0e00817fe3c5c090dfb748137120f5a1ae0261 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Tue, 23 Jun 2020 23:24:20 +0000 Subject: [PATCH 14/19] PR review changes --- tensorflow/python/framework/python_op_gen.cc | 117 +++++++++--------- tensorflow/python/framework/python_op_gen.h | 6 +- .../framework/python_op_gen_internal.cc | 4 +- .../python/framework/python_op_gen_internal.h | 4 +- .../python/framework/python_op_gen_main.cc | 4 +- .../python/framework/python_op_gen_test.cc | 2 +- 6 files changed, 67 insertions(+), 70 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 0b6f974d962..b6e39a4df00 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -45,7 +45,8 @@ const int kRightMargin = 78; constexpr char kEagerFallbackSuffix[] = "_eager_fallback"; -std::unordered_map dtype_type { +// Dtype enums mapped to dtype classes which is the type of each dtype +const std::unordered_map dtype_type { {"_dtypes.float16", "_dtypes.Float16"}, {"_dtypes.half", "_dtypes.Half"}, {"_dtypes.float32", "_dtypes.Float32"}, @@ -133,8 +134,8 @@ string TensorPBString(const TensorProto& pb) { class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { public: GenEagerPythonOp(const OpDef& op_def, const ApiDef& api_def, - const string& function_name, const bool type_annotate_op) - : python_op_gen_internal::GenPythonOp(op_def, api_def, function_name, type_annotate_op) { + const string& function_name, bool add_type_annotations) + : python_op_gen_internal::GenPythonOp(op_def, api_def, function_name, add_type_annotations) { op_name_ = function_name_; absl::ConsumePrefix(&op_name_, "_"); } @@ -160,12 +161,12 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { bool AddEagerFastPathAndGraphCode(const string& parameters, const std::vector& output_sizes, const string& eager_not_allowed_error, - std::unordered_map& type_annotations); + const std::unordered_map& type_annotations); bool AddEagerFallbackCode(const string& parameters, const std::vector& output_sizes, const string& num_outputs_expr, const string& eager_not_allowed_error, - std::unordered_map& type_annotations); + const std::unordered_map& type_annotations); void AddEagerFastPathExecute(); void AddEagerInferredAttrs(const string& indentation); @@ -177,11 +178,11 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { void AddRawOpExport(const string& parameters); - std::unordered_map GetTypeAnnotationMap(); + std::unordered_map GetTypeAnnotations(); - void GenerateTypeVars(std::unordered_map& type_annotations); + void GenerateTypeVars(const std::unordered_map& type_annotations); - void AddReturnTypeAnnotation(std::unordered_map& type_annotations); + void AddReturnTypeAnnotation(const std::unordered_map& type_annotations); void AddAttrForArg(const string& attr, int arg_index) { gtl::InsertIfNotPresent(&inferred_attrs_, attr, @@ -214,8 +215,8 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { }; string GetEagerPythonOp(const OpDef& op_def, const ApiDef& api_def, - const string& function_name, const bool type_annotate_op) { - return GenEagerPythonOp(op_def, api_def, function_name, type_annotate_op).Code(); + const string& function_name, bool add_type_annotations) { + return GenEagerPythonOp(op_def, api_def, function_name, add_type_annotations).Code(); } string GenEagerPythonOp::FlattenInputs( @@ -347,8 +348,8 @@ string GenEagerPythonOp::Code() { std::unordered_map type_annotations; // Only populate map for whitelisted ops - if (type_annotate_op_) { - type_annotations = GetTypeAnnotationMap(); + if (add_type_annotations_) { + type_annotations = GetTypeAnnotations(); } string parameters; @@ -357,33 +358,28 @@ string GenEagerPythonOp::Code() { if (!parameters.empty()) strings::StrAppend(¶meters, ", "); strings::StrAppend(¶meters, param.GetRenameTo()); - // Add type annotations to param if (type_annotations.find(param.GetName()) != type_annotations.end()) { - strings::StrAppend(¶meters, ": ", type_annotations[param.GetName()]); + strings::StrAppend(¶meters, ": ", type_annotations.at(param.GetName())); } } - // Append to parameters and parameters_with_defaults because multiple functions - // are generated (op and fallback op) string parameters_with_defaults = parameters; for (const auto& param_and_default : params_with_default_) { if (!parameters.empty()) strings::StrAppend(¶meters, ", "); if (!parameters_with_defaults.empty()) strings::StrAppend(¶meters_with_defaults, ", "); - // Add type annotations to param_and_default + strings::StrAppend(¶meters, param_and_default.first.GetRenameTo()); + strings::StrAppend(¶meters_with_defaults, param_and_default.first.GetRenameTo()); if (type_annotations.find(param_and_default.first.GetName()) != type_annotations.end()) { - const string param_type = type_annotations[param_and_default.first.GetName()]; - strings::StrAppend(¶meters, param_and_default.first.GetRenameTo(), ": ", param_type); - strings::StrAppend(¶meters_with_defaults, - param_and_default.first.GetRenameTo(), ": ", - param_type, " = ", param_and_default.second); - continue; + const string param_type = type_annotations.at(param_and_default.first.GetName()); + // Append to parameters and parameters_with_defaults because multiple functions + // are generated by AddEagerFastPathAndGraphCode() and AddEagerFallbackCode() + strings::StrAppend(¶meters, ": ", param_type); + strings::StrAppend(¶meters_with_defaults, ":", param_type); } - strings::StrAppend(¶meters, param_and_default.first.GetRenameTo()); - strings::StrAppend(¶meters_with_defaults, - param_and_default.first.GetRenameTo(), "=", + strings::StrAppend(¶meters_with_defaults, "=", param_and_default.second); } @@ -428,9 +424,9 @@ string GenEagerPythonOp::Code() { return prelude_ + result_; } -std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { +std::unordered_map GenEagerPythonOp::GetTypeAnnotations() { std::unordered_map type_annotations; - // Mapping attrs to TypeVars + // Map attrs to TypeVars for (const auto& attr : op_def_.attr()) { if (attr.type() == "type") { const string type_var_name = "TV_" + op_def_.name() + "_" + attr.name(); @@ -441,24 +437,26 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotationMap() { } } - // Mapping input Tensors to their types + // Map input Tensors to their types for (const auto& arg : op_def_.input_arg()) { - // Do not add type annotations to args that accept a sequence of Tensors - if (!arg.number_attr().empty()) continue; + // TODO(rahulkamat): Add type annotations to args that accept a sequence of Tensors + if (!arg.number_attr().empty() || !arg.type_list_attr().empty()) continue; type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations); } - // Mapping output Tensor to its type + // TODO(rahulkamat): Add type annotations to handle return types of a sequence of Tensors. + // Map output Tensor to its type if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); - type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations); + if (arg.number_attr().empty() && arg.type_list_attr().empty()) + type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations); } return type_annotations; } // Generate TypeVars using attrs -void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type_annotations) { +void GenEagerPythonOp::GenerateTypeVars(const std::unordered_map& type_annotations) { bool added_typevar = false; for (const auto& attr : op_def_.attr()) { if (attr.type() == "type") { @@ -466,12 +464,10 @@ void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type for (int t : attr.allowed_values().list().type()) { DataType dtype = static_cast(t); const string py_dtype = python_op_gen_internal::DataTypeToPython(dtype, "_dtypes."); - if (dtype_type.find(py_dtype) != dtype_type.end()) { - allowed_types.emplace_back(dtype_type[py_dtype]); - } + allowed_types.emplace_back(dtype_type.at(py_dtype)); } - // If all dtypes are allowed, add them all + // When a Tensor does not have any dtypes specified, all dtypes are allowed if (allowed_types.empty()) { for (std::pair map_dtype : dtype_type) { allowed_types.emplace_back(map_dtype.second); @@ -486,7 +482,7 @@ void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type strings::StrAppend(&typevar_dtypes, *it); } - const string type_var_name = type_annotations[attr.name()]; + const string type_var_name = type_annotations.at(attr.name()); strings::StrAppend(&result_, type_var_name, " = TypeVar(\"", type_var_name, "\", ", typevar_dtypes,")\n"); added_typevar = true; } @@ -495,14 +491,15 @@ void GenEagerPythonOp::GenerateTypeVars(std::unordered_map& type if (added_typevar) strings::StrAppend(&result_, "\n"); } -// TODO(rahulkamat): Modify AddDefLine() to add return type annotation -void GenEagerPythonOp::AddReturnTypeAnnotation(std::unordered_map& type_annotations) { +void GenEagerPythonOp::AddReturnTypeAnnotation(const std::unordered_map& type_annotations) { if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); - // Add type annotations to param - if (type_annotations.find(arg.name()) != type_annotations.end()) { + if (arg.number_attr().empty() && arg.type_list_attr().empty()) { + const string return_type = type_annotations.at(arg.name()); + // TODO(rahulkamat): Modify AddDefLine() to add return type annotation to avoid + // erasing ":\n" from the end of the def line result_.erase(result_.length() - 2); - strings::StrAppend(&result_, " -> ", type_annotations[arg.name()], ":\n"); + strings::StrAppend(&result_, " -> ", return_type, ":\n"); } } } @@ -829,8 +826,9 @@ void GenEagerPythonOp::AddEagerFunctionTeardown( bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( const string& parameters, const std::vector& output_sizes, - const string& eager_not_allowed_error, std::unordered_map& type_annotations) { - if (type_annotate_op_) { + const string& eager_not_allowed_error, + const std::unordered_map& type_annotations) { + if (add_type_annotations_) { GenerateTypeVars(type_annotations); } if (api_def_.visibility() == ApiDef::VISIBLE) { @@ -839,7 +837,7 @@ bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( AddExport(); AddDefLine(function_name_, parameters); - if (type_annotate_op_) { + if (add_type_annotations_) { AddReturnTypeAnnotation(type_annotations); } AddDocStringDescription(); @@ -877,11 +875,11 @@ bool GenEagerPythonOp::AddEagerFastPathAndGraphCode( bool GenEagerPythonOp::AddEagerFallbackCode( const string& parameters, const std::vector& output_sizes, const string& num_outputs_expr, const string& eager_not_allowed_error, - std::unordered_map& type_annotations) { + const std::unordered_map& type_annotations) { AddDefLine( strings::StrCat(function_name_, kEagerFallbackSuffix), strings::StrCat(parameters, parameters.empty() ? "" : ", ", "ctx")); - if (type_annotate_op_) { + if (add_type_annotations_) { AddReturnTypeAnnotation(type_annotations); } if (!eager_not_allowed_error.empty()) { @@ -1133,7 +1131,7 @@ void GenEagerPythonOp::AddRawOpExport(const string& parameters) { string GetPythonOpsImpl(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, const string& source_file_name = "", - std::unordered_set type_annotate_ops = {}) { + const std::unordered_set type_annotate_ops = {}) { string result; // Header // TODO(josh11b): Mention the library for which wrappers are being generated. @@ -1211,10 +1209,11 @@ from typing import TypeVar continue; } - const bool type_annotate_op = type_annotate_ops.find(op_def.name()) != type_annotate_ops.end(); + auto iter = type_annotate_ops.find(op_def.name()); + bool add_type_annotations = iter != type_annotate_ops.end(); strings::StrAppend(&result, - GetEagerPythonOp(op_def, *api_def, function_name, type_annotate_op)); + GetEagerPythonOp(op_def, *api_def, function_name, add_type_annotations)); } return result; @@ -1225,14 +1224,14 @@ from typing import TypeVar string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, const string& source_file_name, - std::unordered_set type_annotate_ops) { + const std::unordered_set type_annotate_ops) { return GetPythonOpsImpl(ops, api_defs, hidden_ops, source_file_name, type_annotate_ops); } void PrintPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, const string& source_file_name, - std::unordered_set type_annotate_ops) { + const std::unordered_set type_annotate_ops) { printf("%s", GetPythonOpsImpl(ops, api_defs, hidden_ops, source_file_name, type_annotate_ops).c_str()); } @@ -1245,16 +1244,14 @@ string GetPythonWrappers(const char* op_list_buf, size_t op_list_len) { return GetPythonOpsImpl(ops, api_def_map, {}); } -string GetArgAnnotation(const auto& arg, std::unordered_map& type_annotations) { - if (type_annotations.find(arg.type_attr()) != type_annotations.end()) { +string GetArgAnnotation(const auto& arg, const std::unordered_map& type_annotations) { + if (!arg.type_attr().empty()) { // Get the correct TypeVar if arg maps to an attr - return "_ops.Tensor[" + type_annotations[arg.type_attr()] + "]"; + return "_ops.Tensor[" + type_annotations.at(arg.type_attr()) + "]"; } else { // Get the dtype of the Tensor const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); - if (dtype_type.find(py_dtype) != dtype_type.end()) { - return "_ops.Tensor[" + dtype_type[py_dtype] + "]"; - } + return "_ops.Tensor[" + dtype_type.at(py_dtype) + "]"; } return "Any"; diff --git a/tensorflow/python/framework/python_op_gen.h b/tensorflow/python/framework/python_op_gen.h index 1a3b6c5e8f2..5dfc959b3ad 100644 --- a/tensorflow/python/framework/python_op_gen.h +++ b/tensorflow/python/framework/python_op_gen.h @@ -33,7 +33,7 @@ namespace tensorflow { string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, const string& source_file_name, - std::unordered_set type_annotate_ops); + const std::unordered_set type_annotate_ops); // Prints the output of GetPrintOps to stdout. // hidden_ops should be a list of Op names that should get a leading _ @@ -43,7 +43,7 @@ string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs, void PrintPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, const string& source_file_name, - std::unordered_set type_annotate_ops); + const std::unordered_set type_annotate_ops); // Get the python wrappers for a list of ops in a OpList. // `op_list_buf` should be a pointer to a buffer containing @@ -55,7 +55,7 @@ string GetPythonWrappers(const char* op_list_buf, size_t op_list_len); // `arg` should be an input or output of an op // `type_annotations` should contain attr names mapped to TypeVar names string GetArgAnnotation(const auto& arg, - std::unordered_map& type_annotations); + const std::unordered_map& type_annotations); } // namespace tensorflow diff --git a/tensorflow/python/framework/python_op_gen_internal.cc b/tensorflow/python/framework/python_op_gen_internal.cc index d0ef82857c4..adbdbbf06fb 100644 --- a/tensorflow/python/framework/python_op_gen_internal.cc +++ b/tensorflow/python/framework/python_op_gen_internal.cc @@ -513,11 +513,11 @@ const ApiDef::Attr* FindAttr(StringPiece name, const ApiDef& api_def) { } GenPythonOp::GenPythonOp(const OpDef& op_def, const ApiDef& api_def, - const string& function_name, const bool type_annotate_op) + const string& function_name, bool add_type_annotations) : op_def_(op_def), api_def_(api_def), function_name_(function_name), - type_annotate_op_(type_annotate_op), + add_type_annotations_(add_type_annotations), num_outs_(op_def.output_arg_size()) {} GenPythonOp::~GenPythonOp() {} diff --git a/tensorflow/python/framework/python_op_gen_internal.h b/tensorflow/python/framework/python_op_gen_internal.h index 5229bffc5d0..08d9b3c8a66 100644 --- a/tensorflow/python/framework/python_op_gen_internal.h +++ b/tensorflow/python/framework/python_op_gen_internal.h @@ -71,7 +71,7 @@ class ParamNames { class GenPythonOp { public: GenPythonOp(const OpDef& op_def, const ApiDef& api_def, - const string& function_name, const bool type_annotate_op_); + const string& function_name, bool add_type_annotations_); virtual ~GenPythonOp(); virtual string Code(); @@ -98,7 +98,7 @@ class GenPythonOp { const OpDef& op_def_; const ApiDef& api_def_; const string function_name_; - const bool type_annotate_op_; + bool add_type_annotations_; const int num_outs_; // Return value from Code() is prelude_ + result_. diff --git a/tensorflow/python/framework/python_op_gen_main.cc b/tensorflow/python/framework/python_op_gen_main.cc index dcaea53100e..c3ef4202d2a 100644 --- a/tensorflow/python/framework/python_op_gen_main.cc +++ b/tensorflow/python/framework/python_op_gen_main.cc @@ -109,7 +109,7 @@ void PrintAllPythonOps(const std::vector& op_list, const std::vector& api_def_dirs, const string& source_file_name, bool op_list_is_whitelist, - std::unordered_set type_annotate_ops) { + const std::unordered_set type_annotate_ops) { OpList ops; OpRegistry::Global()->Export(false, &ops); @@ -159,7 +159,7 @@ int main(int argc, char* argv[]) { argv[1], ",", tensorflow::str_util::SkipEmpty()); // Add op name to this set to add type annotations - std::unordered_set type_annotate_ops { + const std::unordered_set type_annotate_ops { }; if (argc == 2) { diff --git a/tensorflow/python/framework/python_op_gen_test.cc b/tensorflow/python/framework/python_op_gen_test.cc index cf6566ea7ae..5fff1a1d111 100644 --- a/tensorflow/python/framework/python_op_gen_test.cc +++ b/tensorflow/python/framework/python_op_gen_test.cc @@ -261,7 +261,7 @@ TEST(PythonOpGen, TypeAnnotateDefaultParams) { string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); - const string params = "def foo_bar(x: _ops.Tensor[_dtypes.Float32], t: TV_FooBar_t, var1: bool = False, var2: int = 0, name=None)"; + const string params = "def foo_bar(x: _ops.Tensor[_dtypes.Float32], t: TV_FooBar_t, var1:bool=False, var2:int=0, name=None)"; const string params_fallback = "def foo_bar_eager_fallback(x: _ops.Tensor[_dtypes.Float32], t: TV_FooBar_t, var1: bool, var2: int, name, ctx)"; ExpectHasSubstr(code, params); ExpectHasSubstr(code, params_fallback); From b00c93e3c13db45b8d19121598f9a0b6eaf0b93f Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Wed, 24 Jun 2020 00:37:27 +0000 Subject: [PATCH 15/19] Add test with all ops annotated, Move proto strings inside tests --- tensorflow/python/framework/python_op_gen.cc | 2 + .../python/framework/python_op_gen_test.cc | 442 ++++++++++++------ 2 files changed, 311 insertions(+), 133 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index b6e39a4df00..19946f5b71c 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -434,6 +434,8 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotations() { } else if (attr.type() == "bool" || attr.type() == "float" || attr.type() == "int" || attr.type() == "bytes") { type_annotations[attr.name()] = attr.type(); + } else if (attr.type() == "string") { + type_annotations[attr.name()] = "str"; } } diff --git a/tensorflow/python/framework/python_op_gen_test.cc b/tensorflow/python/framework/python_op_gen_test.cc index 5fff1a1d111..41e27f019cf 100644 --- a/tensorflow/python/framework/python_op_gen_test.cc +++ b/tensorflow/python/framework/python_op_gen_test.cc @@ -23,135 +23,6 @@ limitations under the License. namespace tensorflow { namespace { -constexpr char kBaseOpDef[] = R"( -op { - name: "Foo" - input_arg { - name: "x" - type_attr: "T" - } - input_arg { - name: "y" - type_attr: "T2" - } - output_arg { - name: "output" - type_attr: "T" - } - attr { - name: "T" - type: "type" - allowed_values { - list { - type: DT_UINT8 - type: DT_INT8 - } - } - } - attr { - name: "T2" - type: "type" - allowed_values { - list { - type: DT_STRING - type: DT_FLOAT - type: DT_DOUBLE - } - } - } - summary: "Summary for op Foo." - description: "Description for op Foo." -} -op { - name: "Bar" - input_arg { - name: "x" - type: DT_STRING - } - input_arg { - name: "y" - type: DT_QINT8 - } - output_arg { - name: "output" - type: DT_BOOL - } - summary: "Summary for op Bar." - description: "Description for op Bar." -} -op { - name: "FooBar" - input_arg { - name: "x" - type: DT_FLOAT - } - output_arg { - name: "output" - type: DT_BOOL - } - attr { - name: "t" - type: "type" - allowed_values { - list { - type: DT_HALF - type: DT_INT8 - } - } - } - attr { - name: "var1" - type: "bool" - default_value { - b: false - } - } - attr { - name: "var2" - type: "int" - default_value { - i: 0 - } - } - summary: "Summary for op FooBar." - description: "Description for op FooBar." -} -op { - name: "Baz" - input_arg { - name: "inputs" - number_attr: "N" - type_list_attr: "T" - } - output_arg { - name: "output1" - type: DT_BOOL - } - output_arg { - name: "output2" - type: DT_BOOL - } - attr { - name: "T" - type: "bool" - } - attr { - name: "N" - type: "int" - } - summary: "Summary for op Baz." - description: "Description for op Baz." -} -)"; - -std::unordered_set type_annotate_ops { - "Foo", - "Bar", - "FooBar", - "Baz" -}; - - void ExpectHasSubstr(const string& s, const string& expected) { EXPECT_TRUE(absl::StrContains(s, expected)) << "'Generated ops does not contain '" << expected << "'"; @@ -172,19 +43,67 @@ void ExpectSubstrOrder(const string& s, const string& before, << before << "' is not before '" << after; } -TEST(PythonOpGen, Basic) { +TEST(PythonOpGen, TypeAnnotateAllOps) { OpList ops; OpRegistry::Global()->Export(false, &ops); ApiDefMap api_def_map(ops); - string code = GetPythonOps(ops, api_def_map, {}, "", {}); + std::unordered_set type_annotate_ops; + for (const auto& op : ops.op()) { + type_annotate_ops.insert(op.name()); + } - EXPECT_TRUE(absl::StrContains(code, "def case")); - // TODO(mdan): Add tests to verify type annotations are correctly added. + string code = GetPythonOps(ops, api_def_map, {}, "", type_annotate_ops); + + const string all_types = ", _dtypes.BFloat16, _dtypes.Bool, _dtypes.Complex128, _dtypes.Complex64, " + "_dtypes.Float16, _dtypes.Float32, _dtypes.Float64, _dtypes.Half, _dtypes.Int16, " + "_dtypes.Int32, _dtypes.Int64, _dtypes.Int8, _dtypes.QInt16, _dtypes.QInt32, " + "_dtypes.QInt8, _dtypes.QUInt16, _dtypes.QUInt8, _dtypes.Resource, _dtypes.String, " + "_dtypes.UInt16, _dtypes.UInt32, _dtypes.UInt64, _dtypes.UInt8, _dtypes.Variant)"; + + const string fake_param_typevar = "TV_FakeParam_dtype = TypeVar(\"TV_FakeParam_dtype\"" + all_types; + const string fake_param = "def fake_param_eager_fallback(dtype: TV_FakeParam_dtype, shape, name, ctx) -> _ops.Tensor[TV_FakeParam_dtype]:"; + const string fake_param_fallback = "def fake_param_eager_fallback(dtype: TV_FakeParam_dtype, shape, name, ctx) -> _ops.Tensor[TV_FakeParam_dtype]:"; + + ExpectHasSubstr(code, fake_param_typevar); + ExpectHasSubstr(code, fake_param); + ExpectHasSubstr(code, fake_param_fallback); + + const string to_bool_typevar = "TV_ToBool_T = TypeVar(\"TV_ToBool_T\"" + all_types; + const string to_bool_ = "def to_bool(input: _ops.Tensor[TV_ToBool_T], name=None) -> _ops.Tensor[_dtypes.Bool]:"; + const string to_bool_fallback = "def to_bool_eager_fallback(input: _ops.Tensor[TV_ToBool_T], name, ctx) -> _ops.Tensor[_dtypes.Bool]:"; + + ExpectHasSubstr(code, to_bool_typevar); + ExpectHasSubstr(code, to_bool_); + ExpectHasSubstr(code, to_bool_fallback); } TEST(PythonOpGen, TypeAnnotateSingleTypeTensor) { + constexpr char kBaseOpDef[] = R"( + op { + name: "Bar" + input_arg { + name: "x" + type: DT_STRING + } + input_arg { + name: "y" + type: DT_QINT8 + } + output_arg { + name: "output" + type: DT_BOOL + } + summary: "Summary for op Bar." + description: "Description for op Bar." + } + )"; + + std::unordered_set type_annotate_ops { + "Bar" + }; + OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT @@ -200,6 +119,51 @@ TEST(PythonOpGen, TypeAnnotateSingleTypeTensor) { } TEST(PythonOpGen, TypeAnnotateMultiTypeTensor) { + constexpr char kBaseOpDef[] = R"( + op { + name: "Foo" + input_arg { + name: "x" + type_attr: "T" + } + input_arg { + name: "y" + type_attr: "T2" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_UINT8 + type: DT_INT8 + } + } + } + attr { + name: "T2" + type: "type" + allowed_values { + list { + type: DT_STRING + type: DT_FLOAT + type: DT_DOUBLE + } + } + } + summary: "Summary for op Foo." + description: "Description for op Foo." + } + )"; + + std::unordered_set type_annotate_ops { + "Foo", + }; + OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT @@ -212,6 +176,51 @@ TEST(PythonOpGen, TypeAnnotateMultiTypeTensor) { } TEST(PythonOpGen, GenerateCorrectTypeVars) { + constexpr char kBaseOpDef[] = R"( + op { + name: "Foo" + input_arg { + name: "x" + type_attr: "T" + } + input_arg { + name: "y" + type_attr: "T2" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_UINT8 + type: DT_INT8 + } + } + } + attr { + name: "T2" + type: "type" + allowed_values { + list { + type: DT_STRING + type: DT_FLOAT + type: DT_DOUBLE + } + } + } + summary: "Summary for op Foo." + description: "Description for op Foo." + } + )"; + + std::unordered_set type_annotate_ops { + "Foo", + }; + OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT @@ -228,6 +237,51 @@ TV_Foo_T2 = TypeVar("TV_Foo_T2", _dtypes.Float32, _dtypes.Float64, _dtypes.Strin } TEST(PythonOpGen, TypeAnnotateFallback) { + constexpr char kBaseOpDef[] = R"( + op { + name: "Foo" + input_arg { + name: "x" + type_attr: "T" + } + input_arg { + name: "y" + type_attr: "T2" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_UINT8 + type: DT_INT8 + } + } + } + attr { + name: "T2" + type: "type" + allowed_values { + list { + type: DT_STRING + type: DT_FLOAT + type: DT_DOUBLE + } + } + } + summary: "Summary for op Foo." + description: "Description for op Foo." + } + )"; + + std::unordered_set type_annotate_ops { + "Foo", + }; + OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT @@ -240,6 +294,51 @@ TEST(PythonOpGen, TypeAnnotateFallback) { } TEST(PythonOpGen, GenerateTypeVarAboveOp) { + constexpr char kBaseOpDef[] = R"( + op { + name: "Foo" + input_arg { + name: "x" + type_attr: "T" + } + input_arg { + name: "y" + type_attr: "T2" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_UINT8 + type: DT_INT8 + } + } + } + attr { + name: "T2" + type: "type" + allowed_values { + list { + type: DT_STRING + type: DT_FLOAT + type: DT_DOUBLE + } + } + } + summary: "Summary for op Foo." + description: "Description for op Foo." + } + )"; + + std::unordered_set type_annotate_ops { + "Foo", + }; + OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT @@ -254,6 +353,50 @@ TEST(PythonOpGen, GenerateTypeVarAboveOp) { TEST(PythonOpGen, TypeAnnotateDefaultParams) { + constexpr char kBaseOpDef[] = R"( + op { + name: "FooBar" + input_arg { + name: "x" + type: DT_FLOAT + } + output_arg { + name: "output" + type: DT_BOOL + } + attr { + name: "t" + type: "type" + allowed_values { + list { + type: DT_HALF + type: DT_INT8 + } + } + } + attr { + name: "var1" + type: "bool" + default_value { + b: false + } + } + attr { + name: "var2" + type: "int" + default_value { + i: 0 + } + } + summary: "Summary for op FooBar." + description: "Description for op FooBar." + } + )"; + + std::unordered_set type_annotate_ops { + "FooBar", + }; + OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT @@ -268,6 +411,39 @@ TEST(PythonOpGen, TypeAnnotateDefaultParams) { } TEST(PythonOpGen, NoTypingSequenceTensors) { + constexpr char kBaseOpDef[] = R"( + op { + name: "Baz" + input_arg { + name: "inputs" + number_attr: "N" + type_list_attr: "T" + } + output_arg { + name: "output1" + type: DT_BOOL + } + output_arg { + name: "output2" + type: DT_BOOL + } + attr { + name: "T" + type: "bool" + } + attr { + name: "N" + type: "int" + } + summary: "Summary for op Baz." + description: "Description for op Baz." + } + )"; + + std::unordered_set type_annotate_ops { + "Baz" + }; + OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT From 61ef65bae9e0b9860941cd7fb86c14babaef1f8c Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Wed, 24 Jun 2020 17:14:05 +0000 Subject: [PATCH 16/19] Update comment describing dtype map --- tensorflow/python/framework/python_op_gen.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 19946f5b71c..de5618103d3 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -45,7 +45,7 @@ const int kRightMargin = 78; constexpr char kEagerFallbackSuffix[] = "_eager_fallback"; -// Dtype enums mapped to dtype classes which is the type of each dtype +// Maps C++ dtype enum values to Python DType classes const std::unordered_map dtype_type { {"_dtypes.float16", "_dtypes.Float16"}, {"_dtypes.half", "_dtypes.Half"}, From eff1aa2b458aa3f7ea5c210914ba429abd28bcb5 Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Wed, 24 Jun 2020 18:29:50 +0000 Subject: [PATCH 17/19] Update comments --- .../python/framework/python_op_gen_main.cc | 2 +- .../python/framework/python_op_gen_test.cc | 16 +++++++--------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen_main.cc b/tensorflow/python/framework/python_op_gen_main.cc index c3ef4202d2a..cd7c2d53097 100644 --- a/tensorflow/python/framework/python_op_gen_main.cc +++ b/tensorflow/python/framework/python_op_gen_main.cc @@ -158,7 +158,7 @@ int main(int argc, char* argv[]) { std::vector api_def_dirs = tensorflow::str_util::Split( argv[1], ",", tensorflow::str_util::SkipEmpty()); - // Add op name to this set to add type annotations + // Add op name here to generate type annotations for it const std::unordered_set type_annotate_ops { }; diff --git a/tensorflow/python/framework/python_op_gen_test.cc b/tensorflow/python/framework/python_op_gen_test.cc index 41e27f019cf..dc2ba4597c9 100644 --- a/tensorflow/python/framework/python_op_gen_test.cc +++ b/tensorflow/python/framework/python_op_gen_test.cc @@ -106,7 +106,7 @@ TEST(PythonOpGen, TypeAnnotateSingleTypeTensor) { OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); - protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); @@ -166,7 +166,7 @@ TEST(PythonOpGen, TypeAnnotateMultiTypeTensor) { OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); - protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); @@ -223,7 +223,7 @@ TEST(PythonOpGen, GenerateCorrectTypeVars) { OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); - protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); @@ -284,7 +284,7 @@ TEST(PythonOpGen, TypeAnnotateFallback) { OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); - protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); @@ -341,7 +341,7 @@ TEST(PythonOpGen, GenerateTypeVarAboveOp) { OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); - protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); @@ -399,7 +399,7 @@ TEST(PythonOpGen, TypeAnnotateDefaultParams) { OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); - protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); @@ -446,7 +446,7 @@ TEST(PythonOpGen, NoTypingSequenceTensors) { OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); - protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); // NOLINT + protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, {}, "", type_annotate_ops); @@ -456,7 +456,5 @@ TEST(PythonOpGen, NoTypingSequenceTensors) { ExpectHasSubstr(code, baz_def_line); } -// TODO(mdan): Include more tests with synhtetic ops and api defs. - } // namespace } // namespace tensorflow From 0089b010bf5fb3365bddb2bf0a40bec254cc9d3b Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Wed, 24 Jun 2020 19:45:36 +0000 Subject: [PATCH 18/19] Remove auto used as a function argument type --- tensorflow/python/framework/python_op_gen.cc | 12 ++++++------ tensorflow/python/framework/python_op_gen.h | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index de5618103d3..473eae43cd2 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -443,7 +443,7 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotations() { for (const auto& arg : op_def_.input_arg()) { // TODO(rahulkamat): Add type annotations to args that accept a sequence of Tensors if (!arg.number_attr().empty() || !arg.type_list_attr().empty()) continue; - type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations); + type_annotations[arg.name()] = GetArgAnnotation(arg.type_attr(), arg.type(), type_annotations); } // TODO(rahulkamat): Add type annotations to handle return types of a sequence of Tensors. @@ -451,7 +451,7 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotations() { if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); if (arg.number_attr().empty() && arg.type_list_attr().empty()) - type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations); + type_annotations[arg.name()] = GetArgAnnotation(arg.type_attr(), arg.type(), type_annotations); } return type_annotations; @@ -1246,13 +1246,13 @@ string GetPythonWrappers(const char* op_list_buf, size_t op_list_len) { return GetPythonOpsImpl(ops, api_def_map, {}); } -string GetArgAnnotation(const auto& arg, const std::unordered_map& type_annotations) { - if (!arg.type_attr().empty()) { +string GetArgAnnotation(const string& arg_type_attr, DataType arg_type, const std::unordered_map& type_annotations) { + if (!arg_type_attr.empty()) { // Get the correct TypeVar if arg maps to an attr - return "_ops.Tensor[" + type_annotations.at(arg.type_attr()) + "]"; + return "_ops.Tensor[" + type_annotations.at(arg_type_attr) + "]"; } else { // Get the dtype of the Tensor - const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); + const string py_dtype = python_op_gen_internal::DataTypeToPython(arg_type, "_dtypes."); return "_ops.Tensor[" + dtype_type.at(py_dtype) + "]"; } diff --git a/tensorflow/python/framework/python_op_gen.h b/tensorflow/python/framework/python_op_gen.h index 5dfc959b3ad..75f04952d48 100644 --- a/tensorflow/python/framework/python_op_gen.h +++ b/tensorflow/python/framework/python_op_gen.h @@ -54,7 +54,7 @@ string GetPythonWrappers(const char* op_list_buf, size_t op_list_len); // Get the type annotation for an arg // `arg` should be an input or output of an op // `type_annotations` should contain attr names mapped to TypeVar names -string GetArgAnnotation(const auto& arg, +string GetArgAnnotation(const string& arg_type_attr, DataType arg_type, const std::unordered_map& type_annotations); } // namespace tensorflow From eb500b17ac6178d5d411203b9771c720b7f117ac Mon Sep 17 00:00:00 2001 From: rahul-kamat Date: Wed, 24 Jun 2020 21:23:32 +0000 Subject: [PATCH 19/19] Pass single structured arg to function --- tensorflow/python/framework/python_op_gen.cc | 12 ++++++------ tensorflow/python/framework/python_op_gen.h | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 473eae43cd2..ecece1655ef 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -443,7 +443,7 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotations() { for (const auto& arg : op_def_.input_arg()) { // TODO(rahulkamat): Add type annotations to args that accept a sequence of Tensors if (!arg.number_attr().empty() || !arg.type_list_attr().empty()) continue; - type_annotations[arg.name()] = GetArgAnnotation(arg.type_attr(), arg.type(), type_annotations); + type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations); } // TODO(rahulkamat): Add type annotations to handle return types of a sequence of Tensors. @@ -451,7 +451,7 @@ std::unordered_map GenEagerPythonOp::GetTypeAnnotations() { if (op_def_.output_arg_size() == 1) { const auto& arg = op_def_.output_arg(0); if (arg.number_attr().empty() && arg.type_list_attr().empty()) - type_annotations[arg.name()] = GetArgAnnotation(arg.type_attr(), arg.type(), type_annotations); + type_annotations[arg.name()] = GetArgAnnotation(arg, type_annotations); } return type_annotations; @@ -1246,13 +1246,13 @@ string GetPythonWrappers(const char* op_list_buf, size_t op_list_len) { return GetPythonOpsImpl(ops, api_def_map, {}); } -string GetArgAnnotation(const string& arg_type_attr, DataType arg_type, const std::unordered_map& type_annotations) { - if (!arg_type_attr.empty()) { +string GetArgAnnotation(const OpDef::ArgDef& arg, const std::unordered_map& type_annotations) { + if (!arg.type_attr().empty()) { // Get the correct TypeVar if arg maps to an attr - return "_ops.Tensor[" + type_annotations.at(arg_type_attr) + "]"; + return "_ops.Tensor[" + type_annotations.at(arg.type_attr()) + "]"; } else { // Get the dtype of the Tensor - const string py_dtype = python_op_gen_internal::DataTypeToPython(arg_type, "_dtypes."); + const string py_dtype = python_op_gen_internal::DataTypeToPython(arg.type(), "_dtypes."); return "_ops.Tensor[" + dtype_type.at(py_dtype) + "]"; } diff --git a/tensorflow/python/framework/python_op_gen.h b/tensorflow/python/framework/python_op_gen.h index 75f04952d48..178e078a81b 100644 --- a/tensorflow/python/framework/python_op_gen.h +++ b/tensorflow/python/framework/python_op_gen.h @@ -54,7 +54,7 @@ string GetPythonWrappers(const char* op_list_buf, size_t op_list_len); // Get the type annotation for an arg // `arg` should be an input or output of an op // `type_annotations` should contain attr names mapped to TypeVar names -string GetArgAnnotation(const string& arg_type_attr, DataType arg_type, +string GetArgAnnotation(const OpDef::ArgDef& arg, const std::unordered_map& type_annotations); } // namespace tensorflow