diff --git a/.bazelrc b/.bazelrc index 928c70b9b70..bdfd910d431 100644 --- a/.bazelrc +++ b/.bazelrc @@ -396,17 +396,21 @@ build:rbe_linux_cuda_nvcc_py36 --config=rbe_linux_cuda_nvcc_base --repo_env=TF_P build:rbe_linux_cuda_nvcc_py37 --config=rbe_linux_cuda_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.7" build:rbe_linux_cuda_nvcc_py38 --config=rbe_linux_cuda_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-gcc7_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.8" -build:rbe_linux_cuda_clang --config=rbe_linux_cuda_base -build:rbe_linux_cuda_clang --crosstool_top="@ubuntu16.04-py3-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda//crosstool:toolchain" -build:rbe_linux_cuda_clang --extra_toolchains="@ubuntu16.04-py3-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda//crosstool:toolchain-linux-x86_64" -build:rbe_linux_cuda_clang --extra_execution_platforms="@ubuntu16.04-py3-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" -build:rbe_linux_cuda_clang --host_platform="@ubuntu16.04-py3-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" -build:rbe_linux_cuda_clang --platforms="@ubuntu16.04-py3-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" -build:rbe_linux_cuda_clang --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu16.04-py3-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda" -build:rbe_linux_cuda_clang --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu16.04-py3-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_tensorrt" -build:rbe_linux_cuda_clang --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu16.04-py3-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_nccl" -build:rbe_linux_cuda_clang --define=using_cuda_clang=true -test:rbe_linux_cuda_clang --config=rbe_linux_cuda_base +build:rbe_linux_cuda_clang_base --config=rbe_linux_cuda_base +build:rbe_linux_cuda_clang_base --crosstool_top="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda//crosstool:toolchain" +build:rbe_linux_cuda_clang_base --extra_toolchains="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda//crosstool:toolchain-linux-x86_64" +build:rbe_linux_cuda_clang_base --extra_execution_platforms="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" +build:rbe_linux_cuda_clang_base --host_platform="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" +build:rbe_linux_cuda_clang_base --platforms="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_platform//:platform" +build:rbe_linux_cuda_clang_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_cuda" +build:rbe_linux_cuda_clang_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_tensorrt" +build:rbe_linux_cuda_clang_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_nccl" +build:rbe_linux_cuda_clang_base --define=using_cuda_clang=true +build:rbe_linux_cuda_clang_py27 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python2.7" +build:rbe_linux_cuda_clang_py35 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.5" +build:rbe_linux_cuda_clang_py36 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.6" +build:rbe_linux_cuda_clang_py37 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.7" +build:rbe_linux_cuda_clang_py38 --config=rbe_linux_cuda_clang_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0_config_python3.8" common:rbe_gpu_linux --config=rbe_linux_cuda_nvcc diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc index 0a64f3c91a5..540efe9dcc0 100644 --- a/tensorflow/c/eager/c_api.cc +++ b/tensorflow/c/eager/c_api.cc @@ -747,9 +747,7 @@ TF_DeviceList* TFE_ContextListDevices(TFE_Context* ctx, TF_Status* status) { } void TFE_ContextClearCaches(TFE_Context* ctx) { - tensorflow::EagerContext* context = - tensorflow::ContextFromInterface(tensorflow::unwrap(ctx)); - context->ClearCachesAndThreadExecutors(); + tensorflow::unwrap(ctx)->ClearCachesAndThreadExecutors(); } // Set server_def on the context, possibly updating it. diff --git a/tensorflow/c/eager/context_interface.h b/tensorflow/c/eager/context_interface.h index 1587df8865d..be0aad31a35 100644 --- a/tensorflow/c/eager/context_interface.h +++ b/tensorflow/c/eager/context_interface.h @@ -79,6 +79,8 @@ class AbstractContextInterface { // List attributes of available devices virtual void ListDevices(std::vector* devices) = 0; + virtual void ClearCachesAndThreadExecutors() = 0; + protected: virtual ~AbstractContextInterface() {} }; diff --git a/tensorflow/compiler/mlir/lite/flatbuffer_export.cc b/tensorflow/compiler/mlir/lite/flatbuffer_export.cc index 94db5400fba..e9192388070 100644 --- a/tensorflow/compiler/mlir/lite/flatbuffer_export.cc +++ b/tensorflow/compiler/mlir/lite/flatbuffer_export.cc @@ -1017,10 +1017,10 @@ Optional> Translator::BuildOperator( inst->getName().print(os); // Print out attributes except for large elementsattributes (which should // rarely be the cause why the legalization didn't happen). - if (!inst->getAttrList().getAttrs().empty()) { + if (!inst->getMutableAttrDict().getAttrs().empty()) { os << " {"; bool first = true; - for (auto& named_attr : inst->getAttrList().getDictionary()) { + for (auto& named_attr : inst->getMutableAttrDict().getDictionary()) { os << (!first ? ", " : ""); first = false; named_attr.first.print(os); diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td index e2aec40d603..d0c15f7e9ec 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td @@ -48,10 +48,14 @@ class TfDevice_Op traits = []> : Op { } def TfDevice_LaunchOp : TfDevice_Op<"launch", - [SingleBlockImplicitTerminator<"ReturnOp">]> -{ - let summary = [{The `tf_device.launch` op captures all needed live-in values - and launches containing operations on target device.}]; + [SingleBlockImplicitTerminator<"ReturnOp">]> { + let summary = [{ +The `tf_device.launch` op launches containing operations on target device. + }]; + + let description = [{ +This op captures all needed live-in values. + }]; let arguments = (ins StrAttr:$device @@ -85,8 +89,8 @@ def TfDevice_LaunchOp : TfDevice_Op<"launch", def TfDevice_ReturnOp : TfDevice_Op<"return", [Terminator]> { let summary = [{ - The `tf_device.return` operation terminates and returns values from - `tf_device.launch` operation; +The `tf_device.return` operation terminates and returns values from a +`tf_device` dialect operation. }]; let arguments = (ins @@ -121,7 +125,6 @@ def TfDevice_LaunchFuncOp : TfDevice_Op<"launch_func", []> { let extraClassDeclaration = [{ StringRef getFunc() { return func(); } StringRef getDevice() { return device(); } - FunctionType getFuncType(); }]; } @@ -281,4 +284,51 @@ For example: let verifier = [{ return Verify(*this); }]; } +def TfDevice_ClusterOp : TfDevice_Op<"cluster", + [SingleBlockImplicitTerminator<"ReturnOp">]> { + let summary = [{ +The `tf_device.cluster` op wraps containing operations in a region. + }]; + + let description = [{ +This op can be used to group operations, and captures all needed live-in values. + }]; + + let arguments = (ins); + + let results = (outs + Variadic:$results + ); + + let regions = (region SizedRegion<1>:$body); + + let extraClassDeclaration = [{ + Block &GetBody() { return getOperation()->getRegion(0).front(); } + }]; +} + +def TfDevice_ClusterFuncOp : TfDevice_Op<"cluster_func", []> { + let summary = [{ +The `tf_device.cluster_func` launches a function containing the body of a +cluster. + }]; + + let description = [{ +This op is used for outlining a cluster. + }]; + + let arguments = (ins + FlatSymbolRefAttr:$func, + Variadic:$operands + ); + + let results = (outs + Variadic:$results + ); + + let extraClassDeclaration = [{ + StringRef getFunc() { return func(); } + }]; +} + #endif // TF_DEVICE_DIALECT diff --git a/tensorflow/compiler/mlir/tensorflow/tests/tpu_extract_outside_compilation.mlir b/tensorflow/compiler/mlir/tensorflow/tests/tpu_extract_outside_compilation.mlir index b0b46b4ff5d..b2e8f116827 100644 --- a/tensorflow/compiler/mlir/tensorflow/tests/tpu_extract_outside_compilation.mlir +++ b/tensorflow/compiler/mlir/tensorflow/tests/tpu_extract_outside_compilation.mlir @@ -91,3 +91,55 @@ func @nodep_multiple_outside_compilation() -> () { }) {device = "tpu0", launch_attr = "launch_attr"} : () -> () return } + +// Tests extraction of a single outside compiled cluster with single TPU cluster return. + +// CHECK-LABEL: func @single_tpu_return_single_outside_compilation +func @single_tpu_return_single_outside_compilation(%arg0: tensor) -> tensor { + %0 = "tf.A"(%arg0) : (tensor) -> tensor + // CHECK: %[[REPLICATE:[0-9]*]]:2 = tf_device.replicate + // CHECK: %[[PARALLEL_EXECUTE_OUTPUT:[0-9]*]] = "tf_device.parallel_execute" + // CHECK-NEXT: "tf_device.launch" + // CHECK: %[[TPU_LAUNCH_OUTPUT:[0-9]*]] = "tf_device.launch" + // CHECK: tf_device.return + // CHECK: tf_device.return %[[TPU_LAUNCH_OUTPUT]] + // CHECK: tf_device.return %[[PARALLEL_EXECUTE_OUTPUT]] + %1:2 = tf_device.replicate([%0, %arg0] as %ri_0: tensor) {n = 2 : i32} { + %2 = "tf_device.launch"() ( { + "tf.A"() : () -> () + "tf.B"() {_xla_outside_compilation = "cluster1"} : () -> () + %3 = "tf.C"() : () -> tensor + tf_device.return %3 : tensor + }) {device = "tpu0", launch_attr = "launch_attr"} : () -> tensor + tf_device.return %2 : tensor + } + + return %1 : tensor +} + +// Tests extraction of a single outside compiled cluster with multiple TPU cluster return. + +// CHECK-LABEL: func @multiple_tpu_return_single_outside_compilation +func @multiple_tpu_return_single_outside_compilation(%arg0: tensor) -> tensor { + %0 = "tf.A"(%arg0) : (tensor) -> tensor + // CHECK: %[[REPLICATE:[0-9]*]]:4 = tf_device.replicate + // CHECK: %[[PARALLEL_EXECUTE_OUTPUT:[0-9]*]]:2 = "tf_device.parallel_execute" + // CHECK-NEXT: "tf_device.launch" + // CHECK: %[[TPU_LAUNCH_OUTPUT:[0-9]*]]:2 = "tf_device.launch" + // CHECK: tf_device.return + // CHECK: tf_device.return %[[TPU_LAUNCH_OUTPUT]] + // CHECK: tf_device.return %[[PARALLEL_EXECUTE_OUTPUT]] + %1:4 = tf_device.replicate([%0, %arg0] as %ri_0: tensor) {n = 2 : i32} { + %2, %3 = "tf_device.launch"() ( { + %4 = "tf.A"() : () -> tensor + "tf.B"() {_xla_outside_compilation = "cluster1"} : () -> () + %5 = "tf.C"() : () -> tensor + tf_device.return %4, %5 : tensor, tensor + }) {device = "tpu0", launch_attr = "launch_attr"} : () -> (tensor, tensor) + tf_device.return %2, %3 : tensor, tensor + } + + return %1 : tensor +} + +// TODO(b/154363171): Add test cases for when output of outside compilation is returned by parallel_execute. diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_formation.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_formation.cc index 860d537c7ef..0571701413a 100644 --- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_formation.cc +++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_formation.cc @@ -65,7 +65,8 @@ constexpr char kBadTPUReplicateAttrMsg[] = "requires '_tpu_replicate' string attribute"; // Mapping for `_tpu_replicate` attribute to TPUReplicateMetadata attributes. -using MetadataMap = llvm::SmallDenseMap; +using MetadataMap = + llvm::SmallDenseMap; // Mapping for `_tpu_replicate` attribute to ops of a cluster. using ClusterMap = llvm::SmallDenseMapwalk([&](TF::TPUReplicateMetadataOp metadata_op) -> WalkResult { - NamedAttributeList attrs = metadata_op.getAttrs(); + MutableDictionaryAttr attrs = metadata_op.getAttrs(); // Missing or bad `_tpu_replicate` attribute. auto tpu_replicate_attr = attrs.get(kTPUReplicateAttr); diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_outside_compilation.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_outside_compilation.cc index 34d0b98c962..4e20cd9d64b 100644 --- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_outside_compilation.cc +++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_outside_compilation.cc @@ -103,6 +103,18 @@ tf_device::LaunchOp CreateLaunchOpForCluster(OpBuilder* builder, return launch_op; } +// Propagates the return from `parallel_execute_op` to parent replicate +// op if it exists. +void PropagateParallelExecuteReturnToReplicate( + tf_device::ParallelExecuteOp parallel_execute_op) { + // Update the return for the parallel_execute op parent. + auto replicate = llvm::dyn_cast_or_null( + parallel_execute_op.getParentOp()); + if (replicate) + replicate.GetBody().getTerminator()->setOperands( + parallel_execute_op.execute_outputs()); +} + // Creates a `parallel_execute` op in place of launch with 'clusters` and // 'launch` as regions. void CreateParallelExecuteFromClusters(tf_device::LaunchOp launch, @@ -111,14 +123,8 @@ void CreateParallelExecuteFromClusters(tf_device::LaunchOp launch, // Create parallel_execute regions. The original TPU cluster computation // is the extra region. int num_regions = 1 + clusters.size(); - // TODO(b/154363171): Correctly determine output_types. Add tests to confirm - // that the types for parallel_execute_op match the concatenated output - // types of the contained regions. - // TODO(b/154363171): Remap the results of the `launch` op to use the - // results of the `parallel_execute` op. - llvm::SmallVector concatenated_output_types; auto parallel_execute_op = builder.create( - launch.getLoc(), num_regions, concatenated_output_types); + launch.getLoc(), num_regions, launch.results().getTypes()); // Move outside compilation clusters to parallel_execute regions. for (const auto& cluster : llvm::enumerate(clusters)) { @@ -131,7 +137,10 @@ void CreateParallelExecuteFromClusters(tf_device::LaunchOp launch, CreateLaunchOpForCluster(&builder, cluster_ops.back()); MoveClusterOpsToLaunchOp(launch_op, cluster_ops); builder.setInsertionPointToEnd(&outside_block); - builder.create(launch.getLoc(), launch.getResults()); + // TODO(b/154363171): Handle returns from OutsideCompiled parallel_execute + // regions either through communication with TPU parallel_execute regions + // or modifying parallel_execute returns. + builder.create(launch.getLoc(), ArrayRef{}); } // Move the launch body to last parallel_execute block. @@ -140,6 +149,11 @@ void CreateParallelExecuteFromClusters(tf_device::LaunchOp launch, builder.setInsertionPointToEnd(&inside_block); builder.create(launch.getLoc(), launch.getResults()); launch.getOperation()->moveBefore(inside_block.getTerminator()); + + PropagateParallelExecuteReturnToReplicate(parallel_execute_op); + // TODO(b/154363171): Handle returns from OutsideCompiled parallel_execute + // regions either through communication with TPU parallel_execute regions + // or modifying parallel_execute returns. } void TPUExtractOutsideCompilation::runOnFunction() { diff --git a/tensorflow/compiler/mlir/tensorflow/translate/breakup-islands.cc b/tensorflow/compiler/mlir/tensorflow/translate/breakup-islands.cc index 46be9464b83..3245e3b9e6a 100644 --- a/tensorflow/compiler/mlir/tensorflow/translate/breakup-islands.cc +++ b/tensorflow/compiler/mlir/tensorflow/translate/breakup-islands.cc @@ -113,7 +113,7 @@ void BreakUpIslands::runOnFunction() { state.addOperands(operands); Operation* new_op = builder.createOperation(state); item.replaceAllUsesWith(new_op); - new_op->setAttrs(item.getAttrList()); + new_op->setAttrs(item.getMutableAttrDict()); item.erase(); } } diff --git a/tensorflow/compiler/mlir/tensorflow/translate/control_to_executor_dialect.cc b/tensorflow/compiler/mlir/tensorflow/translate/control_to_executor_dialect.cc index b5ebd45936a..9aeaa0ba318 100644 --- a/tensorflow/compiler/mlir/tensorflow/translate/control_to_executor_dialect.cc +++ b/tensorflow/compiler/mlir/tensorflow/translate/control_to_executor_dialect.cc @@ -167,7 +167,7 @@ void ControlToExecutorDialectConversion::runOnFunction() { op.getResult(0).replaceAllUsesWith(replacement->getResult(0)); for (int i : llvm::seq(1, op.getNumResults())) op.getResult(i).replaceAllUsesWith(replacement->getResult(i + 1)); - replacement->setAttrs(op.getAttrList()); + replacement->setAttrs(op.getMutableAttrDict()); op.erase(); continue; } else if (op.getName().getStringRef() == "_tf.NextIteration.sink") { @@ -177,7 +177,7 @@ void ControlToExecutorDialectConversion::runOnFunction() { frame_name_to_loop[frame.getValue()]; replacement = builder.create( loc, srcOp.token(), operands, ArrayRef{}); - replacement->setAttrs(op.getAttrList()); + replacement->setAttrs(op.getMutableAttrDict()); op.erase(); continue; } else if (op.getName().getStringRef() == "_tf.LoopCond") { @@ -220,7 +220,7 @@ void ControlToExecutorDialectConversion::runOnFunction() { // Create the operation inside the island OpBuilder island_builder = OpBuilder::atBlockEnd(&island.GetBody()); Operation *inner_op = island_builder.createOperation(result); - inner_op->setAttrs(op.getAttrList()); + inner_op->setAttrs(op.getMutableAttrDict()); // Add the terminator for the island SmallVector ret_vals(inner_op->getResults()); @@ -230,7 +230,7 @@ void ControlToExecutorDialectConversion::runOnFunction() { // Copy the attributes from the original operation to the replacement and // remap the results. if (!isa(replacement)) - replacement->setAttrs(op.getAttrList()); + replacement->setAttrs(op.getMutableAttrDict()); for (int i : llvm::seq(0, op.getNumResults())) op.getResult(i).replaceAllUsesWith(replacement->getResult(i)); op.erase(); diff --git a/tensorflow/compiler/mlir/tensorflow/translate/executor_to_control_dialect.cc b/tensorflow/compiler/mlir/tensorflow/translate/executor_to_control_dialect.cc index 7d0b75006a7..481f1fac7b8 100644 --- a/tensorflow/compiler/mlir/tensorflow/translate/executor_to_control_dialect.cc +++ b/tensorflow/compiler/mlir/tensorflow/translate/executor_to_control_dialect.cc @@ -136,7 +136,7 @@ void ExecutorToControlDialectConversion::runOnFunction() { // Create the replacement operation. auto *replacement = builder.createOperation(state); - replacement->setAttrs(wrapped_op.getAttrList()); + replacement->setAttrs(wrapped_op.getMutableAttrDict()); for (auto ops_and_ret_vals : llvm::zip(wrapped_op.getResults(), replacement->getResults())) @@ -208,7 +208,7 @@ void ExecutorToControlDialectConversion::runOnFunction() { // Create the replacement operation. auto *replacement = builder.createOperation(state); - replacement->setAttrs(op.getAttrList()); + replacement->setAttrs(op.getMutableAttrDict()); if (auto next_iteration = dyn_cast(op)) { diff --git a/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util.cc b/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util.cc index c22e86e08de..b891682366b 100644 --- a/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util.cc +++ b/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util.cc @@ -258,7 +258,8 @@ Status ConvertMLIRToXlaComputation( mlir::ModuleOp module_op, llvm::StringRef device_type, xla::XlaComputation* xla_computation, bool use_tuple_args, bool return_tuple, - const XlaCompiler::ShapeRepresentationFn shape_representation_fn) { + const XlaCompiler::ShapeRepresentationFn shape_representation_fn, + std::vector> custom_legalization_passes) { mlir::PassManager tf2xla(module_op.getContext()); // Mark main function as public, and other functions as private. tf2xla.addPass( @@ -277,7 +278,11 @@ Status ConvertMLIRToXlaComputation( tf2xla.addPass(mlir::xla_hlo::createLegalizeTFControlFlowPass()); tf2xla.addNestedPass(mlir::xla_hlo::createLegalizeTFPass(true)); + for (auto& target_pass : custom_legalization_passes) { + tf2xla.addNestedPass(std::move(target_pass)); + } tf2xla.addNestedPass(mlir::createCanonicalizerPass()); + tf2xla.addPass(mlir::TF::CreateTFShapeInferencePass()); // Leverage tf2xla kernels for ops that didn't get lowered in the previous // legalization pass. @@ -324,7 +329,8 @@ static Status CompileMlirToXlaHlo( mlir::ModuleOp module_op, llvm::ArrayRef arg_shapes, llvm::StringRef device_type, bool use_tuple_args, XlaCompiler::ShapeRepresentationFn shape_representation_fn, - XlaCompiler::CompilationResult* compilation_result) { + XlaCompiler::CompilationResult* compilation_result, + std::vector> custom_legalization_passes) { if (VLOG_IS_ON(1)) tensorflow::DumpMlirOpToFile("mlir_compile_before", module_op); @@ -342,7 +348,8 @@ static Status CompileMlirToXlaHlo( TF_RETURN_IF_ERROR(ConvertMLIRToXlaComputation( module_op, device_type, compilation_result->computation.get(), use_tuple_args, - /*return_tuple=*/true, shape_representation_fn)); + /*return_tuple=*/true, shape_representation_fn, + std::move(custom_legalization_passes))); // Construct mapping from XlaComputation's arg to input edges of execute // node. @@ -372,7 +379,8 @@ Status CompileSerializedMlirToXlaHlo( llvm::StringRef mlir_module_string, llvm::ArrayRef arg_shapes, llvm::StringRef device_type, bool use_tuple_args, const XlaCompiler::ShapeRepresentationFn shape_representation_fn, - XlaCompiler::CompilationResult* compilation_result) { + XlaCompiler::CompilationResult* compilation_result, + std::vector> custom_legalization_passes) { RegisterDialects(); mlir::MLIRContext mlir_context; mlir::OwningModuleRef mlir_module; @@ -381,7 +389,8 @@ Status CompileSerializedMlirToXlaHlo( ParseMlirModule(mlir_module_string, &mlir_context, &mlir_module)); return CompileMlirToXlaHlo(mlir_module.get(), arg_shapes, device_type, use_tuple_args, shape_representation_fn, - compilation_result); + compilation_result, + std::move(custom_legalization_passes)); } Status CompileGraphToXlaHlo( @@ -389,7 +398,8 @@ Status CompileGraphToXlaHlo( llvm::StringRef device_type, bool use_tuple_args, const FunctionLibraryDefinition& flib_def, const GraphDebugInfo& debug_info, const XlaCompiler::ShapeRepresentationFn shape_representation_fn, - XlaCompiler::CompilationResult* compilation_result) { + XlaCompiler::CompilationResult* compilation_result, + std::vector> custom_legalization_passes) { RegisterDialects(); mlir::MLIRContext context; GraphImportConfig config; @@ -400,7 +410,8 @@ Status CompileGraphToXlaHlo( return CompileMlirToXlaHlo(module_or.ValueOrDie().get(), arg_shapes, device_type, use_tuple_args, - shape_representation_fn, compilation_result); + shape_representation_fn, compilation_result, + std::move(custom_legalization_passes)); } } // namespace tensorflow diff --git a/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util.h b/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util.h index 74c602a7afb..0218efb83c6 100644 --- a/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util.h +++ b/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util.h @@ -19,6 +19,7 @@ limitations under the License. #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" #include "mlir/IR/Module.h" // from @llvm-project +#include "mlir/Pass/Pass.h" // from @llvm-project #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/protobuf/graph_debug_info.pb.h" @@ -50,11 +51,14 @@ namespace tensorflow { // shape_representation_fn: when this is set, this shape representation function // will be used to determine argument and result shapes. Otherwise the // original shape will be used as is. +// custom_legalization_passes: passes to run before the default TF legalization +// passes for backend-specific ops. Status ConvertMLIRToXlaComputation( mlir::ModuleOp module_op, llvm::StringRef device_type, xla::XlaComputation* xla_computation, bool use_tuple_args, bool return_tuple, - const XlaCompiler::ShapeRepresentationFn shape_representation_fn = nullptr); + const XlaCompiler::ShapeRepresentationFn shape_representation_fn = nullptr, + std::vector> custom_legalization_passes = {}); // Compiles a serialized MLIR module into XLA HLO, generates all accompanying // metadata and stores them in CompilationResult. @@ -62,7 +66,8 @@ Status CompileSerializedMlirToXlaHlo( llvm::StringRef mlir_module_string, llvm::ArrayRef arg_shapes, llvm::StringRef device_type, bool use_tuple_args, const XlaCompiler::ShapeRepresentationFn shape_representation_fn, - XlaCompiler::CompilationResult* compilation_result); + XlaCompiler::CompilationResult* compilation_result, + std::vector> custom_legalization_passes = {}); // Same as the above but takes input as TensorFlow Graph. Status CompileGraphToXlaHlo( @@ -70,7 +75,8 @@ Status CompileGraphToXlaHlo( llvm::StringRef device_type, bool use_tuple_args, const FunctionLibraryDefinition& flib_def, const GraphDebugInfo& debug_info, const XlaCompiler::ShapeRepresentationFn shape_representation_fn, - XlaCompiler::CompilationResult* compilation_result); + XlaCompiler::CompilationResult* compilation_result, + std::vector> custom_legalization_passes = {}); } // namespace tensorflow diff --git a/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util_test.cc b/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util_test.cc index 26c50a24f58..118af434629 100644 --- a/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util_test.cc +++ b/tensorflow/compiler/mlir/tensorflow/utils/compile_mlir_util_test.cc @@ -252,6 +252,37 @@ TEST(CompileSerializedMlirToXlaHloTest, ShapeInference) { ::testing::HasSubstr(expected_signature)); } +TEST(CompileSerializedMlirToXlaHloTest, ShapeInferenceAfterLegalization) { + constexpr char mlir_module[] = R"( + module attributes {tf.versions = {producer = 179 : i32}} { + func @main(%arg0: tensor<8x16x16x64xbf16>, %arg1: tensor<64xf32>) -> (tensor<8x16x16x64xbf16>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<*xf32>) { + %0:6 = "tf.FusedBatchNormV3"(%arg0, %arg1, %arg1, %arg1, %arg1) {data_format = "NHWC", device = "", epsilon = 9.99999974E-5 : f32, exponential_avg_factor = 1.000000e+00 : f32, is_training = false} : (tensor<8x16x16x64xbf16>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>) -> (tensor<8x16x16x64xbf16>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<*xf32>) + return %0#0, %0#1, %0#2, %0#3, %0#4, %0#5 : tensor<8x16x16x64xbf16>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<*xf32> + } + } + )"; + + std::vector arg_shapes{TensorShape({8, 16, 16, 64}), + TensorShape({64})}; + XlaCompiler::CompilationResult compilation_result; + + Status s = CompileSerializedMlirToXlaHlo( + mlir_module, arg_shapes, "XLA_CPU_JIT", + /*use_tuple_args=*/true, TestShapeRepresentation, &compilation_result); + TF_ASSERT_OK(s); + + const xla::HloModuleConfig module_config( + compilation_result.computation->GetProgramShape().ValueOrDie()); + auto status_or_hlo_module = xla::HloModule::CreateFromProto( + compilation_result.computation->proto(), module_config); + TF_ASSERT_OK(status_or_hlo_module.status()); + + constexpr char expected_signature[] = + R"(-> (bf16[8,16,16,64], f32[64], f32[64], f32[64], f32[64], f32[0]))"; + EXPECT_THAT(status_or_hlo_module.ValueOrDie()->ToString(), + ::testing::HasSubstr(expected_signature)); +} + TEST(CompileSerializedMlirToXlaHloTest, ConstantFoldHook) { constexpr char mlir_module[] = R"( module attributes {tf.versions = {producer = 179 : i32}} { diff --git a/tensorflow/compiler/mlir/xla/BUILD b/tensorflow/compiler/mlir/xla/BUILD index eb87d43f7d2..598383d81ec 100644 --- a/tensorflow/compiler/mlir/xla/BUILD +++ b/tensorflow/compiler/mlir/xla/BUILD @@ -14,6 +14,7 @@ package_group( "//learning/brain/experimental/dtensor/...", "//learning/brain/experimental/mlir/...", "//learning/brain/google/xla/kernels/...", + "//learning/brain/google/xla/mlir/...", "//learning/brain/swift/swift_mlir/...", "//learning/pathways/data_parallel/tf2xla/...", "//platforms/xla/...", diff --git a/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc b/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc index 7fb373618b6..c9742ad5337 100644 --- a/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc +++ b/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc @@ -30,6 +30,7 @@ limitations under the License. #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" +#include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/MathExtras.h" @@ -810,9 +811,53 @@ OpFoldResult RealOp::fold(ArrayRef operands) { // ConcatenateOp //===----------------------------------------------------------------------===// +namespace { +class ConcatenateOperandRemoval : public OpRewritePattern { + public: + using OpRewritePattern::OpRewritePattern; + LogicalResult matchAndRewrite(ConcatenateOp op, + PatternRewriter& rewriter) const override { + auto axis = op.dimension().getLimitedValue(); + llvm::SmallVector new_operands; + for (auto operand : op.getOperands()) { + auto ty = operand.getType().cast(); + if (ty.getDimSize(axis) != 0) { + new_operands.push_back(operand); + } + } + + if (!new_operands.empty() && new_operands.size() < op.getNumOperands()) { + rewriter.replaceOpWithNewOp(op, op.getResult().getType(), + new_operands, op.dimension()); + return success(); + } + + return failure(); + } +}; +} // namespace + +void ConcatenateOp::getCanonicalizationPatterns( + OwningRewritePatternList& results, MLIRContext* context) { + results.insert(context); +} + OpFoldResult ConcatenateOp::fold(ArrayRef operands) { if (getNumOperands() == 1) return getOperand(0); - return {}; + + ShapedType type = getResult().getType().cast(); + if (!type.hasStaticShape()) return {}; + + auto axis = dimension().getLimitedValue(); + llvm::SmallVector new_operands; + for (auto operand : getOperands()) { + auto ty = operand.getType().cast(); + if (ty.getDimSize(axis) != 0) { + return {}; + } + } + + return DenseElementsAttr::get(type, ArrayRef()); } static LogicalResult Verify(ConcatenateOp op) { @@ -1381,6 +1426,89 @@ void SliceOp::build(OpBuilder& builder, OperationState& result, Value operand, operand, start_indices, limit_indices, strides); } +template +static void SliceElements(I values, ArrayRef sizes, + ArrayRef starts, ArrayRef limits, + ArrayRef strides, + llvm::SmallVectorImpl* out_values) { + assert(starts.size() == limits.size()); + assert(starts.size() == strides.size()); + if (starts.empty()) return; + + int64_t start = starts.front(); + int64_t limit = limits.front(); + int64_t stride = strides.front(); + if (starts.size() == 1) { + for (int i = start; i < limit; i += stride) { + out_values->push_back(*(values + i)); + } + return; + } + + for (; start < limit; start += stride) { + auto begin = values + start * sizes.front(); + SliceElements(begin, sizes.drop_front(), starts.drop_front(), + limits.drop_front(), strides.drop_front(), out_values); + } +} + +template +static Attribute FoldSlice(SliceOp* op, I values) { + auto start = llvm::to_vector<6>(op->start_indices().getValues()); + auto limit = llvm::to_vector<6>(op->limit_indices().getValues()); + auto stride = llvm::to_vector<6>(op->strides().getValues()); + + auto result_type = op->operand().getType().cast(); + if (!result_type.hasStaticShape()) return {}; + + auto shape = result_type.getShape(); + int64_t count = result_type.getNumElements(); + // Compute the striding for each dimension. + llvm::SmallVector sizes; + sizes.reserve(shape.size()); + for (auto v : shape) { + count = count / v; + sizes.push_back(count); + } + + llvm::SmallVector out_values; + out_values.reserve(result_type.getNumElements()); + SliceElements(values, sizes, start, limit, stride, &out_values); + + return DenseElementsAttr::get(op->getResult().getType().cast(), + out_values); +} + +OpFoldResult SliceOp::fold(ArrayRef operands) { + // Check if the SliceOp is a NoOp operation. + auto operand_shape = getOperand().getType().cast().getShape(); + auto result_type = getResult().getType().cast(); + auto result_shape = result_type.getShape(); + + if (result_type.hasStaticShape() && (operand_shape == result_shape)) { + return getOperand(); + } + + if (operands.empty() || !operands.front()) return {}; + + // Evaluate for statically valued inputs. + DenseElementsAttr elements = operands.front().dyn_cast(); + if (!elements) return {}; + + auto etype = elements.getType().getElementType(); + if (etype.isa()) { + return FoldSlice( + this, elements.getIntValues().begin()); + } else if (etype.isa()) { + return FoldSlice< + llvm::mapped_iterator>, + APFloat>(this, elements.getFloatValues().begin()); + } + + return {}; +} + // Returns output dimension size for slice result for the given arguments. // Returns -1 if arguments are illegal. static int64_t InferSliceDim(int64_t input_dim, int64_t start, int64_t end, diff --git a/tensorflow/compiler/mlir/xla/ir/hlo_ops.td b/tensorflow/compiler/mlir/xla/ir/hlo_ops.td index a90b628c3d8..16c9a7b4f05 100644 --- a/tensorflow/compiler/mlir/xla/ir/hlo_ops.td +++ b/tensorflow/compiler/mlir/xla/ir/hlo_ops.td @@ -647,6 +647,8 @@ def HLO_SliceOp: HLO_Op< let results = (outs HLO_Tensor); + let hasFolder = 1; + let builders = [OpBuilder< "OpBuilder &builder, OperationState &result, Value operand, " "DenseIntElementsAttr start_indices, DenseIntElementsAttr limit_indices, " @@ -845,6 +847,7 @@ def HLO_ConcatenateOp : HLO_Op<"concatenate", let results = (outs HLO_Tensor); + let hasCanonicalizer = 1; let hasFolder = 1; } diff --git a/tensorflow/compiler/mlir/xla/ir/hlo_ops_base.td b/tensorflow/compiler/mlir/xla/ir/hlo_ops_base.td index 44e0abab031..c087ffd1f40 100644 --- a/tensorflow/compiler/mlir/xla/ir/hlo_ops_base.td +++ b/tensorflow/compiler/mlir/xla/ir/hlo_ops_base.td @@ -64,14 +64,14 @@ def HLO_TensorOrTuple : AnyTypeOf<[HLO_Tensor, HLO_Tuple]>; // Dynamic representation of a shape vector as a tensor. def HLO_DimensionTensor : ShapedContainerType< - [Index, AnySignlessInteger], + [Index, HLO_Pred, HLO_Int], And<[IsTensorTypePred, HasAnyRankOfPred<[1]>]>, "a 1D tensor of dimensions">; // In general, static shaped tensor constraints should be avoided unless // it is for a legacy op which is only correct with static shapes. def HLO_StaticShapeTensor : StaticShapeTensorOf<[ - AnyFloat, AnySignlessInteger, HLO_Complex]>; + AnyFloat, HLO_Pred, HLO_Int, HLO_Complex]>; //===----------------------------------------------------------------------===// // XLA on tensors combined type definitions. diff --git a/tensorflow/compiler/mlir/xla/tests/canonicalize.mlir b/tensorflow/compiler/mlir/xla/tests/canonicalize.mlir index 0b91613a7f9..5f28693c49d 100644 --- a/tensorflow/compiler/mlir/xla/tests/canonicalize.mlir +++ b/tensorflow/compiler/mlir/xla/tests/canonicalize.mlir @@ -1,5 +1,50 @@ // RUN: xla-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s --dump-input-on-failure +// CHECK-LABEL: concatenate_noop +func @concatenate_noop(%arg0: tensor<4xi32>) -> tensor<4xi32> { + // CHECK-SAME: [[ARG:%.+]]: tensor<4xi32> + %0 = "xla_hlo.concatenate"(%arg0) { dimension = 0 : i64 } : (tensor<4xi32>) -> tensor<4xi32> + + // CHECK: return [[ARG]] + return %0 : tensor<4xi32> +} + +// CHECK-LABEL: concatenate_remove_operand +func @concatenate_remove_operand(%arg0: tensor<4xi32>, %arg1: tensor<0xi32>) -> tensor<4xi32> { + // CHECK-SAME: [[ARG0:%.+]]: tensor<4xi32> + // CHECK-SAME: [[ARG1:%.+]]: tensor<0xi32> + %0 = "xla_hlo.concatenate"(%arg0, %arg1) { dimension = 0 : i64 } : (tensor<4xi32>, tensor<0xi32>) -> tensor<4xi32> + + // CHECK: return [[ARG0]] + return %0 : tensor<4xi32> +} + +// CHECK-LABEL: concatenate_empty_bool +func @concatenate_empty_bool(%arg0: tensor<0xi1>, %arg1: tensor<0xi1>) -> tensor<0xi1> { + // CHECK: xla_hlo.constant + %0 = "xla_hlo.concatenate"(%arg0, %arg1) { dimension = 0 : i64 } : (tensor<0xi1>, tensor<0xi1>) -> tensor<0xi1> + + return %0 : tensor<0xi1> +} + +// CHECK-LABEL: concatenate_empty_int +func @concatenate_empty_int(%arg0: tensor<0xi32>, %arg1: tensor<0xi32>) -> tensor<0xi32> { + // CHECK: xla_hlo.constant + %0 = "xla_hlo.concatenate"(%arg0, %arg1) { dimension = 0 : i64 } : (tensor<0xi32>, tensor<0xi32>) -> tensor<0xi32> + + return %0 : tensor<0xi32> +} + +// CHECK-LABEL: concatenate_empty_float +func @concatenate_empty_float(%arg0: tensor<0xf32>, %arg1: tensor<0xf32>) -> tensor<0xf32> { + // CHECK: xla_hlo.constant + %0 = "xla_hlo.concatenate"(%arg0, %arg1) { dimension = 0 : i64 } : (tensor<0xf32>, tensor<0xf32>) -> tensor<0xf32> + + return %0 : tensor<0xf32> +} + + +// CHECK-LABEL: dynamic_slice_variable_start func @dynamic_slice_variable_start(%arg0: tensor<3x4xi32>, %arg1: tensor, %arg2: tensor) -> tensor<1x4xi32> { // CHECK: "xla_hlo.dynamic-slice" %1 = "xla_hlo.dynamic-slice"(%arg0, %arg1, %arg2) {slice_sizes = dense<[1, 4]> : tensor<2xi64>} : (tensor<3x4xi32>, tensor, tensor) -> tensor<1x4xi32> @@ -31,6 +76,70 @@ func @dynamic_slice_constant_start_dynamic_shape(%arg0: tensor, %arg1: return %2 : tensor } +// CHECK-LABEL: slice_2D_noop +// CHECK-SAME: [[ARG:%.+]]: tensor<2x2xi64> +func @slice_2D_noop(%arg0: tensor<2x2xi64>) -> tensor<2x2xi64> { + %0 = "xla_hlo.slice"(%arg0) { limit_indices = dense<[2, 2]> : tensor<2xi64>, start_indices = dense<[0, 0]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<2x2xi64>) -> (tensor<2x2xi64>) + + // CHECK-NEXT: return [[ARG]] + return %0 : tensor<2x2xi64> +} + +// CHECK-LABEL: slice_1D_fold +func @slice_1D_fold() -> tensor<2xi64> { + %0 = xla_hlo.constant dense<[5, 7, 9, 10]> : tensor<4xi64> + // CHECK: xla_hlo.constant dense<[7, 9]> + %1 = "xla_hlo.slice"(%0) { limit_indices = dense<[3]> : tensor<1xi64>, start_indices = dense<[1]> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>} : (tensor<4xi64>) -> (tensor<2xi64>) + return %1 : tensor<2xi64> +} + +// CHECK-LABEL: slice_1D_fp +func @slice_1D_fp() -> tensor<2xf32> { + %0 = xla_hlo.constant dense<[5.0, 7.0, 9.0, 10.0]> : tensor<4xf32> + // CHECK: xla_hlo.constant dense<[7.000000e+00, 9.000000e+00]> + %1 = "xla_hlo.slice"(%0) { limit_indices = dense<[3]> : tensor<1xi64>, start_indices = dense<[1]> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>} : (tensor<4xf32>) -> (tensor<2xf32>) + return %1 : tensor<2xf32> +} + +// CHECK-LABEL: slice_1D_strided_fold +func @slice_1D_strided_fold() -> tensor<2xi64> { + %0 = xla_hlo.constant dense<[5, 7, 9, 10]> : tensor<4xi64> + // CHECK: xla_hlo.constant dense<[7, 10]> + %1 = "xla_hlo.slice"(%0) { limit_indices = dense<[4]> : tensor<1xi64>, start_indices = dense<[1]> : tensor<1xi64>, strides = dense<2> : tensor<1xi64>} : (tensor<4xi64>) -> (tensor<2xi64>) + return %1 : tensor<2xi64> +} + +// CHECK-LABEL: slice_2D_fold +func @slice_2D_fold() -> tensor<2x2xi64> { + %0 = xla_hlo.constant dense<[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]> : tensor<4x4xi64> + // CHECK-NEXT: xla_hlo.constant dense<[ + // CHECK-SAME: [6, 7], + // CHECK-SAME: [10, 11] + // CHECK-SAME: ]> + %1 = "xla_hlo.slice"(%0) { limit_indices = dense<[3, 4]> : tensor<2xi64>, start_indices = dense<[1, 2]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<4x4xi64>) -> (tensor<2x2xi64>) + return %1 : tensor<2x2xi64> +} + +// CHECK-LABEL: slice_2D_fold_horizontal +func @slice_2D_fold_horizontal() -> tensor<1x4xi64> { + %0 = xla_hlo.constant dense<[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]> : tensor<4x4xi64> + // CHECK-NEXT: xla_hlo.constant dense<[ + // CHECK-SAME: [0, 1, 2, 3] + // CHECK-SAME: ]> + %1 = "xla_hlo.slice"(%0) { limit_indices = dense<[1, 4]> : tensor<2xi64>, start_indices = dense<[0, 0]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<4x4xi64>) -> (tensor<1x4xi64>) + return %1 : tensor<1x4xi64> +} + +// CHECK-LABEL: slice_2D_fold_vertical +func @slice_2D_fold_vertical() -> tensor<4x1xi64> { + %0 = xla_hlo.constant dense<[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]> : tensor<4x4xi64> + // CHECK-NEXT: xla_hlo.constant dense<[ + // CHECK-SAME: [2], [6], [10], [14] + // CHECK-SAME: ]> + %1 = "xla_hlo.slice"(%0) { limit_indices = dense<[4, 3]> : tensor<2xi64>, start_indices = dense<[0, 2]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<4x4xi64>) -> (tensor<4x1xi64>) + return %1 : tensor<4x1xi64> +} + // CHECK-LABEL: func @broadcast_in_dim_identity func @broadcast_in_dim_identity(%arg0: tensor<2x3x4xf32>) -> tensor<2x3x4xf32> { // CHECK: return %arg0 diff --git a/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc b/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc index 37eb31d0ade..a6a6829b109 100644 --- a/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc +++ b/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc @@ -1749,10 +1749,12 @@ class ConvertSigmoidOp : public OpRewritePattern { op.getLoc(), rewriter.getFloatAttr(getElementTypeOrSelf(operand.getType()), 0.5)); - auto shaped_type = operand.getType().cast(); + auto type = operand.getType().dyn_cast(); + if (!type) + return rewriter.notifyMatchFailure(op, "requires ranked tensor type"); auto constant_ones = rewriter.create( - op.getLoc(), shaped_type, scalar_one, - GetI64ElementsAttr(shaped_type.getShape(), &rewriter)); + op.getLoc(), type, scalar_one, + GetI64ElementsAttr(type.getShape(), &rewriter)); auto scaled_input = rewriter.create( op.getLoc(), operand, constant_ones, DenseIntElementsAttr()); diff --git a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_with_tf2xla.cc b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_with_tf2xla.cc index e1edd80a93d..25bdd0f5f62 100644 --- a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_with_tf2xla.cc +++ b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_with_tf2xla.cc @@ -78,29 +78,53 @@ static bool IsOpWhitelisted(Operation* op) { // building valid MLIR using MlirHloBuilder. // TODO(hinsu): Drop explicit whitelist when MLIR based bridge is enabled for // all tf2xla kernels. + // clang-format off static llvm::SmallDenseSet ops = { - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get(), TypeID::get(), - TypeID::get()}; + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get(), + TypeID::get() + }; + // clang-format on auto* abstractOp = op->getAbstractOperation(); if (!abstractOp) return false; diff --git a/tensorflow/compiler/mlir/xla/transforms/xla_hlo_to_lhlo_with_xla.cc b/tensorflow/compiler/mlir/xla/transforms/xla_hlo_to_lhlo_with_xla.cc index aba81886713..ee75ceac2d1 100644 --- a/tensorflow/compiler/mlir/xla/transforms/xla_hlo_to_lhlo_with_xla.cc +++ b/tensorflow/compiler/mlir/xla/transforms/xla_hlo_to_lhlo_with_xla.cc @@ -223,7 +223,7 @@ Status LhloDialectEmitter::Run() { // The function signature will be composed of: // - one memref for each of the parameters. // - one memref for each other buffer allocation. - llvm::SmallVector args_attrs; + llvm::SmallVector args_attrs; for (const HloInstruction* param : computation->parameter_instructions()) { TF_ASSIGN_OR_RETURN(auto arg_type, ::xla::ConvertShapeToType( param->shape(), builder_)); diff --git a/tensorflow/compiler/xla/python/bfloat16.cc b/tensorflow/compiler/xla/python/bfloat16.cc index 2758a941700..e48475b7a85 100644 --- a/tensorflow/compiler/xla/python/bfloat16.cc +++ b/tensorflow/compiler/xla/python/bfloat16.cc @@ -338,12 +338,15 @@ Py_hash_t PyBfloat16_Hash(PyObject* self) { // Python type for PyBfloat16 objects. PyTypeObject PyBfloat16_Type = { - PyVarObject_HEAD_INIT(nullptr, 0) - "bfloat16", // tp_name - sizeof(PyBfloat16), // tp_basicsize - 0, // tp_itemsize - nullptr, // tp_dealloc - 0, // tp_print NOLINT + PyVarObject_HEAD_INIT(nullptr, 0) "bfloat16", // tp_name + sizeof(PyBfloat16), // tp_basicsize + 0, // tp_itemsize + nullptr, // tp_dealloc +#if PY_VERSION_HEX < 0x03080000 + nullptr, // tp_print +#else + 0, // tp_vectorcall_offset +#endif nullptr, // tp_getattr nullptr, // tp_setattr nullptr, // tp_compare / tp_reserved diff --git a/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.py b/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.py index 88d5572555b..ef0caff0ae6 100644 --- a/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.py +++ b/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.py @@ -20,28 +20,18 @@ from __future__ import print_function from absl import logging -from tensorflow.compiler.xla.python import xla_client -from tensorflow.compiler.xla.python import xla_extension as _xla from tensorflow.compiler.xla.python.tpu_driver.client import tpu_client_extension as _tpu_client -class TpuBackend(xla_client.Backend): +class TpuBackend(object): """XLA backend implemented using the Tpu driver API.""" # Cache the backends to prevent double driver initializations. _local_backend = None - def __init__(self, client): - """Creates a new TpuBackend. - - Args: - client: A _tpu_client.TpuClient object. - """ - super(TpuBackend, self).__init__('tpu') - self.client = client - @staticmethod def create(worker=None, force=False): + """Constructs a Cloud TPU backend.""" # `force` == True will skip caching any backends (if applicable) and will # always try to create a new client. if worker is None: @@ -56,52 +46,11 @@ class TpuBackend(xla_client.Backend): if worker == 'local': worker = 'local://' if force: - return TpuBackend(_tpu_client.TpuClient.Get(worker)) + return _tpu_client.TpuClient.Get(worker) if TpuBackend._local_backend is None: logging.info('Starting the local TPU driver.') - TpuBackend._local_backend = TpuBackend( - _tpu_client.TpuClient.Get(worker)) + TpuBackend._local_backend = _tpu_client.TpuClient.Get(worker) return TpuBackend._local_backend else: # We do not cache for non-local backends. - return TpuBackend(_tpu_client.TpuClient.Get(worker)) - - def device_count(self): - return self.client.device_count() - - def local_device_count(self): - return self.client.local_device_count() - - def local_devices(self): - return self.client.local_devices() - - def devices(self): - return self.client.devices() - - def host_id(self): - return self.client.host_id() - - def buffer_from_pyval(self, pyval, device=None, force_copy=False): - return self.client.buffer_from_pyval(pyval, device) - - def compile(self, c_computation, compile_options=None): - compile_options = compile_options or xla_client.CompileOptions() - options = _xla.CompileOptions() - options.argument_layouts = compile_options.argument_layouts - options.parameter_is_tupled_arguments = compile_options.tuple_arguments - build_options = options.executable_build_options - build_options.num_replicas = compile_options.num_replicas - build_options.num_partitions = compile_options.num_partitions - if compile_options.result_layout: - build_options.result_layout = compile_options.result_layout - if compile_options.device_assignment: - build_options.device_assignment = compile_options.device_assignment - return self.client.compile(c_computation, options) - - def get_default_device_assignment(self, num_replicas, num_partitions=None): - if num_partitions is not None: - return self.client.get_default_device_assignment(num_replicas, - num_partitions) - else: - # TODO(henrytan): delete this case after all callers can handle 2D output - return self.client.get_default_device_assignment(num_replicas) + return _tpu_client.TpuClient.Get(worker) diff --git a/tensorflow/compiler/xla/python/xla_client.py b/tensorflow/compiler/xla/python/xla_client.py index 62d31981880..7f09a7e1698 100644 --- a/tensorflow/compiler/xla/python/xla_client.py +++ b/tensorflow/compiler/xla/python/xla_client.py @@ -19,7 +19,6 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import abc import collections import enum # pylint: disable=g-bad-import-order import inspect @@ -47,114 +46,6 @@ ops = _xla.ops profiler = _xla.profiler -class Backend(object, metaclass=abc.ABCMeta): - """Abstract base class for XLA backends.""" - - def __init__(self, platform): - """Creates a new Backend. - - Args: - platform: A string naming the platform; for example 'gpu'. - """ - self.platform = platform - - @abc.abstractmethod - def device_count(self): - """Returns the number of devices known to the backend.""" - - @abc.abstractmethod - def local_device_count(self): - """Returns the number of devices local to this host.""" - - @abc.abstractmethod - def devices(self): - """Returns a list of `device_count()` Device subclasses.""" - - @abc.abstractmethod - def host_id(self): - """Returns the integer ID of this host.""" - - @abc.abstractmethod - def buffer_from_pyval(self, pyval, device=None, force_copy=False): - """Allocates a fresh buffer and populates it with `pyval`.""" - - @abc.abstractmethod - def compile(self, computation, compile_options=None): - """Compiles a computation. Returns an executable.""" - - @abc.abstractmethod - def get_default_device_assignment(self, num_replicas, num_partitions): - """Returns the default device assignment that `compile` would use. - - If `compile_options.device_assignment` isn't set, `compile` will pick a - deterministic device assignment based on the number of replicas and - partitions, possibly optimizing for device locality. This method returns - that assignment, which is useful for e.g. manually replicating a value - before passing it to a compiled executable. - - Args: - num_replicas: the number of replicas needed. - num_partitions: the number of partitions needed. - - Returns: - A list of list of Devices of size `(num_replicas, num_partitions)`. - """ - - -class LocalBackend(Backend): - """XLA backend implemented using the in-process xla::LocalClient API.""" - - def __init__(self, platform, client): - """Creates a new LocalBackend. - - Args: - platform: A string; the user-visible platform name, e.g. 'gpu'. - client: An _xla.PyLocalClient object. - """ - super(LocalBackend, self).__init__(platform) - self.client = client - - def device_count(self): - return self.client.device_count() - - def local_device_count(self): - return self.client.local_device_count() - - def devices(self): - return self.client.devices() - - def local_devices(self): - return self.client.local_devices() - - def host_id(self): - return self.client.host_id() - - def buffer_from_pyval(self, pyval, device=None, force_copy=False): - return self.client.buffer_from_pyval(pyval, device, force_copy) - - def compile(self, c_computation, compile_options=None): - compile_options = compile_options or CompileOptions() - options = _xla.CompileOptions() - options.argument_layouts = compile_options.argument_layouts - options.parameter_is_tupled_arguments = compile_options.tuple_arguments - build_options = options.executable_build_options - build_options.num_replicas = compile_options.num_replicas - build_options.num_partitions = compile_options.num_partitions - if compile_options.result_layout: - build_options.result_layout = compile_options.result_layout - if compile_options.device_assignment: - build_options.device_assignment = compile_options.device_assignment - return self.client.compile(c_computation, options) - - def get_default_device_assignment(self, num_replicas, num_partitions=None): - if num_partitions is not None: - return self.client.get_default_device_assignment(num_replicas, - num_partitions) - else: - # TODO(skye): delete this case after all callers can handle 2D output - return self.client.get_default_device_assignment(num_replicas) - - xla_platform_names = { 'cpu': 'Host', 'gpu': 'CUDA', @@ -162,8 +53,7 @@ xla_platform_names = { def _cpu_backend_factory(): - client = _xla.get_cpu_client(asynchronous=True) - return LocalBackend(platform='cpu', client=client) + return _xla.get_cpu_client(asynchronous=True) def _gpu_backend_factory(distributed_client=None, node_id=0): @@ -186,12 +76,11 @@ def _gpu_backend_factory(distributed_client=None, node_id=0): config.memory_fraction = float(memory_fraction) config.preallocate = preallocate not in ('0', 'false', 'False') - client = _xla.get_nvidia_gpu_client( + return _xla.get_nvidia_gpu_client( asynchronous=True, allocator_config=config, distributed_client=distributed_client, node_id=node_id) - return LocalBackend(platform='gpu', client=client) # Backend factories, keyed by user-visible name, in increasing priority order. @@ -480,29 +369,7 @@ def computation_count(): """ Device = _xla.Device - - -class CompileOptions(object): - """Python object for XLA compile options. - - These options can be passed to the 'compile' step when using a local XLA - client. - """ - - def __init__(self): - self.executable_build_options = _xla.ExecutableBuildOptions() - self.xla_dump_to = None - self.dump_hlo_pass_re = None - self.dump_hlo_module_re = None - self.dump_hlo_as_text = None - self.dump_hlo_as_proto = None - self.hlo_profile = None - self.num_replicas = 1 - self.num_partitions = 1 - self.argument_layouts = None - self.result_layout = None - self.device_assignment = None - self.tuple_arguments = False +CompileOptions = _xla.CompileOptions # An Executable is a C++ class that duck types with the following API: diff --git a/tensorflow/compiler/xla/python/xla_client_test.py b/tensorflow/compiler/xla/python/xla_client_test.py index 9f795d11d8d..62b3fae018a 100644 --- a/tensorflow/compiler/xla/python/xla_client_test.py +++ b/tensorflow/compiler/xla/python/xla_client_test.py @@ -1942,7 +1942,7 @@ def TestFactory(xla_backend, cloud_tpu=False): del buffer # Free "buffer" to make sure dlt retains ownership. self.assertEqual(type(dlt).__name__, "PyCapsule") y = xla_client._xla.dlpack_managed_tensor_to_buffer( - dlt, self.backend.client) + dlt, self.backend) np.testing.assert_array_equal(x, y.to_py()) def testTensorsCanBeConsumedOnceOnly(self): @@ -1952,7 +1952,7 @@ def TestFactory(xla_backend, cloud_tpu=False): def ConsumeDLPackTensor(): _ = xla_client._xla.dlpack_managed_tensor_to_buffer( - dlt, self.backend.client) + dlt, self.backend) ConsumeDLPackTensor() self.assertRaisesRegex( diff --git a/tensorflow/compiler/xla/service/hlo_pass_fix.h b/tensorflow/compiler/xla/service/hlo_pass_fix.h index 93cb4e71d6d..a22a394c6a4 100644 --- a/tensorflow/compiler/xla/service/hlo_pass_fix.h +++ b/tensorflow/compiler/xla/service/hlo_pass_fix.h @@ -68,8 +68,8 @@ class HloPassFix : public Pass { VLOG(3) << "changed_this_iteration: " << changed_this_iteration; ++iteration_count; if (iteration_count == kLimit) { - LOG(WARNING) << "Unexpectedly high number of iterations in HLO passes, " - "exiting fixed point loop."; + VLOG(1) << "Unexpectedly high number of iterations in HLO passes, " + "exiting fixed point loop."; // Return false in case this is fixed point is nested. return false; } diff --git a/tensorflow/compiler/xla/service/memory_space_assignment.cc b/tensorflow/compiler/xla/service/memory_space_assignment.cc index fe143d7e86e..d5a118c00dc 100644 --- a/tensorflow/compiler/xla/service/memory_space_assignment.cc +++ b/tensorflow/compiler/xla/service/memory_space_assignment.cc @@ -22,6 +22,76 @@ namespace { // Define a dummy chunk for chunks that will be allocated in the default memory // space and for keeping track of number of asynchronous copies. const HeapSimulator::Chunk kDummyChunk{-1, -1}; + +// Returns a heuristic value that captures how much putting this tensor to +// the alternate memory would help if the op is memory bound, or otherwise +// how far off is the op to memory boundedness. The larger this number, the +// higher priority it will be placed in the alternate memory. +float GetAlternateMemoryBenefit( + const MemorySpaceAssignmentCostAnalysis& cost_analysis, + const HloInstruction& instruction, + float elapsed_time_due_to_alternate_mem) { + float elapsed_time_due_to_compute = + cost_analysis.GetInstructionElapsedDueToCompute(instruction); + float elapsed_time_due_to_memory = + cost_analysis.GetInstructionElapsedDueToMemory(instruction); + if (elapsed_time_due_to_memory > elapsed_time_due_to_compute) { + // Memory bound, return how much alternate memory is better. + return elapsed_time_due_to_memory - elapsed_time_due_to_alternate_mem; + } else { + // Compute bound, return how far off are we to memory boundedness. + return elapsed_time_due_to_memory - elapsed_time_due_to_compute; + } +} + +// Returns a heuristic value of memory boundedness for the given BufferInterval. +// The larger this number, the higher priority it will be placed in the +// alternate memory. +float GetMemoryBoundedness( + const MemorySpaceAssignmentCostAnalysis& cost_analysis, + const GlobalDecreasingSizeBestFitHeap::BufferInterval& interval) { + const HloInstruction& defining_instruction = + *interval.buffer->defining_instruction(); + float alternate_mem_benefit = + GetAlternateMemoryBenefit(cost_analysis, defining_instruction, + cost_analysis.GetInstructionElapsedDueToMemory( + defining_instruction, + /*operand_in_alternate_mem=*/{}, + /*output_in_alternate_mem=*/true)); + for (const HloUse& use : interval.buffer->uses()) { + float use_alternate_mem_benefit = GetAlternateMemoryBenefit( + cost_analysis, *use.instruction, + cost_analysis.GetInstructionElapsedDueToMemory(*use.instruction, + use.operand_number)); + // If the benefit is positive (memory bound), add it to this buffer's + // benefit. If the benefit is negative (compute bound), calculate the + // maximum. + if (alternate_mem_benefit > 0 && use_alternate_mem_benefit > 0) { + alternate_mem_benefit += use_alternate_mem_benefit; + } else { + alternate_mem_benefit = + std::max(alternate_mem_benefit, use_alternate_mem_benefit); + } + } + + // Get performance slowdown in seconds of prefetching current BufferInterval + // causing to other BufferIntervals. + float alternate_mem_slowdown = + cost_analysis.GetInstructionElapsedDueToMemorySlowdown(interval.size); + + // Scale the slowdown based on the time of this buffer. We would want earlier + // buffers have lower slowdown values, because they are less likely to overlap + // with other HLOs. + // TODO(yuemmawang): We may want a piecewise function, where a lower slowdown + // for early HLOs, and full slowdown for mid-to-late HLOs. + // TODO(yuemmawang): Further in a smarter way, we want buffers overlapped with + // more HLOs have higher slowdown, and vice versa. + float scale = interval.start * 1.0 / cost_analysis.GetScheduleEndTime(); + alternate_mem_slowdown *= scale; + + return alternate_mem_benefit - alternate_mem_slowdown; +} + } // namespace float MemorySpaceAssignmentCostAnalysis::GetInstructionElapsedDueToCompute( @@ -255,6 +325,12 @@ std::string CostAnalysisPrefetchIntervalPicker::ToNoCopyDebugString( ", logical interval elapsed (s) = ", logical_interval_elapsed); } +absl::optional +CostAnalysisPrefetchIntervalPicker::BufferIntervalAlternateMemoryBenefit( + const GlobalDecreasingSizeBestFitHeap::BufferInterval& interval) const { + return GetMemoryBoundedness(cost_analysis_, interval); +} + std::string MemorySpaceAssignment::AllocationValue::ToString() const { std::string out = absl::StrCat("computation = ", computation()->name()); absl::StrAppend(&out, "\n position:\n"); @@ -495,6 +571,86 @@ bool AlternateMemoryBestFitHeap::IsUseAllowedInAlternateMemory( return true; } +void AlternateMemoryBestFitHeap::AppendBufferInfoDebugString( + const GlobalDecreasingSizeBestFitHeap::BufferInterval& interval, + std::string* debug_str) const { + // Columns in buffer information: + // buffer_id: int. This value can be used to match the allocation in + // allocation information. + // buffer_name: string. + // alt_mem_benefit: float. Roughly corresponds to how much the cost analysis + // thought it would be beneficial to put this in the alternate memory. The + // higher the value, the more it is memory bound. + // size: int. In bytes. + // definition_time: int. Logical time this value was defined in the schedule. + // use_times: string. This is a semicolon-separated list of integers for all + // the use times. + if (debug_str->empty()) { + // Append the column names. + absl::StrAppend(debug_str, + "buffer_id,buffer_name,alt_mem_benefit,size,definition_" + "time,use_times\n"); + } + const HloBuffer& buffer = + alias_analysis_.GetBufferContainingValue(*interval.buffer); + const auto& instruction_schedule = hlo_live_range_.instruction_schedule(); + int64 definition_time = + instruction_schedule.at(interval.buffer->defining_position().instruction); + std::set use_times; + for (const HloValue* value : buffer.values()) { + for (const HloUse& use : value->uses()) { + use_times.insert(instruction_schedule.at(use.instruction)); + } + } + + absl::StrAppend(debug_str, buffer.id(), ","); + absl::StrAppend(debug_str, "\"", interval.buffer->ToShortString(), "\","); + auto alternate_memory_benefit = + options_.prefetch_interval_picker->BufferIntervalAlternateMemoryBenefit( + interval); + absl::StrAppend( + debug_str, alternate_memory_benefit ? *alternate_memory_benefit : 0, ","); + absl::StrAppend(debug_str, interval.size, ","); + absl::StrAppend(debug_str, definition_time, ","); + absl::StrAppend(debug_str, "\"", absl::StrJoin(use_times, ";"), "\""); + absl::StrAppend(debug_str, "\n"); +} + +void AlternateMemoryBestFitHeap::AppendAllocationInfoDebugString( + const GlobalDecreasingSizeBestFitHeap::BufferInterval& interval, + const MemorySpaceAssignment::Allocation& allocation, + std::string* debug_str) const { + // Columns in allocation information: + // buffer_id: int. This value can be used the match with buffer info. + // size: int. In bytes. + // offset: int. In bytes. + // start_time: int. Logical start time of the allocation. + // end_time: int. Logical end time of the allocation. + if (debug_str->empty()) { + // Append the column names. + absl::StrAppend(debug_str, "buffer_id,size,offset,start_time,end_time\n"); + } + if (allocation.memory_space() == MemorySpace::kAlternate) { + const HloBuffer& buffer = + alias_analysis_.GetBufferContainingValue(*interval.buffer); + absl::StrAppend(debug_str, buffer.id(), ","); + absl::StrAppend(debug_str, interval.size, ","); + absl::StrAppend(debug_str, allocation.chunk().offset, ","); + absl::StrAppend(debug_str, allocation.start_time(), ","); + absl::StrAppend(debug_str, allocation.end_time(), "\n"); + } +} + +void AlternateMemoryBestFitHeap::DumpIfEnabled( + absl::string_view buffer_info_str, + absl::string_view allocation_info_str) const { + if (!options_.dump_fn) { + return; + } + options_.dump_fn("bufferinfo", buffer_info_str); + options_.dump_fn("allocinfo", allocation_info_str); +} + HeapSimulator::Result AlternateMemoryBestFitHeap::Finish() { std::vector sorted_buffer_intervals = GetSortedBufferIntervals(); @@ -514,6 +670,9 @@ HeapSimulator::Result AlternateMemoryBestFitHeap::Finish() { } } + std::string buffer_info_str; + std::string allocation_info_str; + for (auto& interval : sorted_buffer_intervals) { if (!interval.need_allocation) { continue; @@ -616,6 +775,8 @@ HeapSimulator::Result AlternateMemoryBestFitHeap::Finish() { } } + AppendBufferInfoDebugString(interval, &buffer_info_str); + // Data structure to contain the preferred offset for a given computation. // We ensure that the same offset will be allocated outside the while loop // as well as inside the while loop. @@ -743,6 +904,8 @@ HeapSimulator::Result AlternateMemoryBestFitHeap::Finish() { if (allocation_success) { for (AllocationValue& allocation_value : allocation_values) { for (auto& allocation : *allocation_value.allocation_sequence()) { + AppendAllocationInfoDebugString(interval, *allocation, + &allocation_info_str); allocations_->push_back(std::move(allocation)); } } @@ -752,6 +915,12 @@ HeapSimulator::Result AlternateMemoryBestFitHeap::Finish() { pending_async_copies_.clear(); } + VLOG(3) << "Debug buffer info: "; + VLOG(3) << buffer_info_str; + VLOG(3) << "Debug allocation info: "; + VLOG(3) << allocation_info_str; + DumpIfEnabled(buffer_info_str, allocation_info_str); + return result_; } @@ -1544,70 +1713,8 @@ AlternateMemoryBestFitHeap::FindBestChunkCandidate( MemorySpaceAssignment::GetMemoryBoundednessBufferIntervalCompare( const MemorySpaceAssignmentCostAnalysis& cost_analysis) { return [&](const BufferInterval& x, const BufferInterval& y) { - // Returns a heuristic value that captures how much putting this tensor to - // the alternate memory would help if the op is memory bound, or otherwise - // how far off is the op to memory boundedness. The larger this number, the - // higher priority it will be placed in the alternate memory. - auto get_alternate_mem_benefit = - [&](const HloInstruction& instruction, - float elapsed_time_due_to_alternate_mem) { - float elapsed_time_due_to_compute = - cost_analysis.GetInstructionElapsedDueToCompute(instruction); - float elapsed_time_due_to_memory = - cost_analysis.GetInstructionElapsedDueToMemory(instruction); - if (elapsed_time_due_to_memory > elapsed_time_due_to_compute) { - // Memory bound, return how much alternate memory is better. - return elapsed_time_due_to_memory - - elapsed_time_due_to_alternate_mem; - } else { - // Compute bound, return how far off are we to memory boundedness. - return elapsed_time_due_to_memory - elapsed_time_due_to_compute; - } - }; - - auto get_memory_boundedness = [&](const BufferInterval& interval) { - const HloInstruction& defining_instruction = - *interval.buffer->defining_instruction(); - float alternate_mem_benefit = get_alternate_mem_benefit( - defining_instruction, cost_analysis.GetInstructionElapsedDueToMemory( - defining_instruction, - /*operand_in_alternate_mem=*/{}, - /*output_in_alternate_mem=*/true)); - for (const HloUse& use : interval.buffer->uses()) { - float use_alternate_mem_benefit = get_alternate_mem_benefit( - *use.instruction, cost_analysis.GetInstructionElapsedDueToMemory( - *use.instruction, use.operand_number)); - // If the benefit is positive (memory bound), add it to this buffer's - // benefit. If the benefit is negative (compute bound), calculate the - // maximum. - if (alternate_mem_benefit > 0 && use_alternate_mem_benefit > 0) { - alternate_mem_benefit += use_alternate_mem_benefit; - } else { - alternate_mem_benefit = - std::max(alternate_mem_benefit, use_alternate_mem_benefit); - } - } - - // Get performance slowdown in seconds of prefetching current - // BufferInterval causing to other BufferIntervals. - float alternate_mem_slowdown = - cost_analysis.GetInstructionElapsedDueToMemorySlowdown(interval.size); - - // Scale the slowdown based on the time of this buffer. We would want - // earlier buffers have lower slowdown values, because they are less - // likely to overlap with other HLOs. - // TODO (yuemmawang) We may want a piecewise function, where a lower - // slowdown for early HLOs, and full slowdown for mid-to-late HLOs. - // TODO (yuemmawang) Further in a smarter way, we want buffers overlapped - // with more HLOs have higher slowdown, and vice versa. - float scale = interval.start * 1.0 / cost_analysis.GetScheduleEndTime(); - alternate_mem_slowdown *= scale; - - return alternate_mem_benefit - alternate_mem_slowdown; - }; - - float x_memory_boundedness = get_memory_boundedness(x); - float y_memory_boundedness = get_memory_boundedness(y); + float x_memory_boundedness = GetMemoryBoundedness(cost_analysis, x); + float y_memory_boundedness = GetMemoryBoundedness(cost_analysis, y); if (x_memory_boundedness != y_memory_boundedness) { return x_memory_boundedness > y_memory_boundedness; } diff --git a/tensorflow/compiler/xla/service/memory_space_assignment.h b/tensorflow/compiler/xla/service/memory_space_assignment.h index 5572aca5581..ab4bc5bf106 100644 --- a/tensorflow/compiler/xla/service/memory_space_assignment.h +++ b/tensorflow/compiler/xla/service/memory_space_assignment.h @@ -63,9 +63,15 @@ class PresetAssignments { return assignment_info_; } + // Get debugging information. + std::string buffer_info_str() const { return buffer_info_str_; } + std::string allocation_info_str() const { return allocation_info_str_; } + private: std::vector> chunks_; std::vector> assignment_info_; + std::string buffer_info_str_; + std::string allocation_info_str_; }; // A wrapper class around HloCostAnalysis with additional knowledge about the @@ -165,6 +171,14 @@ class PrefetchIntervalPicker { virtual std::string ToNoCopyDebugString(const Shape& shape, int64 start_time, int64 end_time) const = 0; + // Prefetch interval pickers may return a value corresponding to the benefit + // of placing the BufferInterval in the alternate memory. The larger value, + // the more beneficial. + virtual absl::optional BufferIntervalAlternateMemoryBenefit( + const GlobalDecreasingSizeBestFitHeap::BufferInterval& interval) const { + return absl::nullopt; + } + protected: const absl::flat_hash_map* instruction_schedule_ = nullptr; @@ -239,6 +253,10 @@ class CostAnalysisPrefetchIntervalPicker : public PrefetchIntervalPicker { std::string ToNoCopyDebugString(const Shape& shape, int64 start_time, int64 end_time) const override; + absl::optional BufferIntervalAlternateMemoryBenefit( + const GlobalDecreasingSizeBestFitHeap::BufferInterval& interval) + const override; + private: // Returns the elapsed time in seconds between the logical interval that // corresponds to the instruction schedule. @@ -317,6 +335,11 @@ class MemorySpaceAssignment { // buffers. bool verify = false; + // If not nullptr, this function is called to dump debugging information. + // The first argument is appended to the file name and the second argument + // is the contents of the file. + std::function dump_fn = nullptr; + // Enable prefetching buffers into preferred memory across program // boundaries bool enable_cross_program_prefetch = true; @@ -899,6 +922,17 @@ class AlternateMemoryBestFitHeap : public GlobalDecreasingSizeBestFitHeap { // buffers from the interval trees. void UncommitPendingChunks(); + // Append buffer and allocation infos for debugging and dump it into a file, + // if enabled. + void AppendBufferInfoDebugString(const BufferInterval& interval, + std::string* debug_str) const; + void AppendAllocationInfoDebugString( + const BufferInterval& interval, + const MemorySpaceAssignment::Allocation& allocation, + std::string* debug_str) const; + void DumpIfEnabled(absl::string_view buffer_info_str, + absl::string_view allocation_info_str) const; + // Returns the available heap size in the alternate memory. int64 available_heap_size() const { return options_.max_size_in_bytes - reserved_in_bytes_; diff --git a/tensorflow/compiler/xla/service/rng_bit_generator_expander.cc b/tensorflow/compiler/xla/service/rng_bit_generator_expander.cc index 61f69d2712b..52901df5bf1 100644 --- a/tensorflow/compiler/xla/service/rng_bit_generator_expander.cc +++ b/tensorflow/compiler/xla/service/rng_bit_generator_expander.cc @@ -32,15 +32,20 @@ limitations under the License. namespace xla { namespace { -StatusOr GetPhiloxStateOp(XlaOp input_state) { - TF_ASSIGN_OR_RETURN(const Shape* shape, - input_state.builder()->GetShapePtr(input_state)); - if (shape->dimensions(0) >= 3) { +XlaOp GetPhiloxStateOp(XlaOp input_state, const Shape& state_shape) { + if (state_shape.dimensions(0) >= 3) { return Slice(input_state, {1}, {3}, {1}); } return Rev(input_state, {0}); } +XlaOp GetPhiloxOutputStateOp(XlaOp output_state, const Shape& state_shape) { + if (state_shape.dimensions(0) < 3) { + output_state = Slice(output_state, {0}, {1}, {1}); + } + return output_state; +} + } // namespace bool RngBitGeneratorExpander::InstructionMatchesPattern( @@ -60,25 +65,22 @@ StatusOr RngBitGeneratorExpander::GetGeneratorComputation( XlaBuilder builder("rng"); XlaOp state_param = Parameter(&builder, 0, state_shape, "state"); XlaOp key_op = Reshape(Slice(state_param, {0}, {1}, {1}), {}); - XlaOp state_op; - - BitGeneratorTy generator = nullptr; + RngOutput output; switch (algorithm) { case RandomAlgorithm::RNG_THREE_FRY: - generator = ThreeFryBitGenerator; - state_op = Slice(state_param, {1}, {2}, {1}); + output = ThreeFryBitGenerator(key_op, Slice(state_param, {1}, {2}, {1}), + data_shape); break; - case RandomAlgorithm::RNG_PHILOX: { - generator = PhiloxBitGenerator; - TF_ASSIGN_OR_RETURN(state_op, GetPhiloxStateOp(state_param)); + case RandomAlgorithm::RNG_PHILOX: + output = PhiloxBitGenerator( + key_op, GetPhiloxStateOp(state_param, state_shape), data_shape); + output.state = GetPhiloxOutputStateOp(output.state, state_shape); break; - } default: return Unimplemented("Unsupported random algorthm: %s", RandomAlgorithm_Name(algorithm)); } - RngOutput output = generator(key_op, state_op, data_shape); XlaOp final_state = ConcatInDim(&builder, {Reshape(key_op, {1}), output.state}, 0); Tuple(&builder, {final_state, output.value}); diff --git a/tensorflow/core/api_def/base_api/api_def_MlirPassthroughOp.pbtxt b/tensorflow/core/api_def/base_api/api_def_MlirPassthroughOp.pbtxt index 42b80a60725..1486759f23f 100644 --- a/tensorflow/core/api_def/base_api/api_def_MlirPassthroughOp.pbtxt +++ b/tensorflow/core/api_def/base_api/api_def_MlirPassthroughOp.pbtxt @@ -27,7 +27,7 @@ func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> @tf.function def foo(x, y): - return = mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) + return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def() ``` diff --git a/tensorflow/core/api_def/base_api/api_def_ShuffleAndRepeatDatasetV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_ShuffleAndRepeatDatasetV2.pbtxt new file mode 100644 index 00000000000..93135231b87 --- /dev/null +++ b/tensorflow/core/api_def/base_api/api_def_ShuffleAndRepeatDatasetV2.pbtxt @@ -0,0 +1,4 @@ +op { + graph_op_name: "ShuffleAndRepeatDatasetV2" + visibility: HIDDEN +} diff --git a/tensorflow/core/common_runtime/function.cc b/tensorflow/core/common_runtime/function.cc index 0df10490ef1..08619371833 100644 --- a/tensorflow/core/common_runtime/function.cc +++ b/tensorflow/core/common_runtime/function.cc @@ -165,6 +165,12 @@ class FunctionLibraryRuntimeOverlay : public FunctionLibraryRuntime { void Run(const Options& opts, Handle handle, CallFrameInterface* call_frame, DoneCallback done) override; + Status RunSync(Options opts, Handle handle, gtl::ArraySlice args, + std::vector* rets) override; + + Status RunSync(Options opts, Handle handle, + CallFrameInterface* frame) override; + Status CreateKernel(const std::shared_ptr& props, OpKernel** kernel) override; @@ -235,6 +241,17 @@ void FunctionLibraryRuntimeOverlay::Run(const Options& opts, Handle handle, base_flr_->Run(opts, handle, call_frame, std::move(done)); } +Status FunctionLibraryRuntimeOverlay::RunSync(Options opts, Handle handle, + gtl::ArraySlice args, + std::vector* rets) { + return base_flr_->RunSync(std::move(opts), handle, args, rets); +} + +Status FunctionLibraryRuntimeOverlay::RunSync(Options opts, Handle handle, + CallFrameInterface* call_frame) { + return base_flr_->RunSync(std::move(opts), handle, call_frame); +} + Status FunctionLibraryRuntimeOverlay::CreateKernel( const std::shared_ptr&, OpKernel**) { // We don't have access to base_lib_def_ in base function library runtime (aka @@ -331,6 +348,10 @@ class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime { std::vector* rets, DoneCallback done) override; void Run(const Options& opts, Handle handle, CallFrameInterface* frame, DoneCallback done) override; + Status RunSync(Options opts, Handle handle, gtl::ArraySlice args, + std::vector* rets) override; + Status RunSync(Options opts, Handle handle, + CallFrameInterface* call_frame) override; bool IsStateful(const string& function) const override; @@ -424,6 +445,10 @@ class FunctionLibraryRuntimeImpl : public FunctionLibraryRuntime { gtl::ArraySlice args, std::vector* rets, Item* item, DoneCallback done); + Status PrepareRunSync( + Handle handle, Options* run_opts, Item** out_item, + std::unique_ptr* out_rendezvous); + void ExecutorArgsFromOptions(const FunctionLibraryRuntime::Options& run_opts, CallFrameInterface* frame, Executor::Args* exec_args); @@ -1187,6 +1212,79 @@ void FunctionLibraryRuntimeImpl::Run(const Options& opts, Handle handle, item->exec->RunAsync(exec_args, std::move(done)); } +Status FunctionLibraryRuntimeImpl::PrepareRunSync( + Handle handle, Options* run_opts, Item** out_item, + std::unique_ptr* out_rendezvous) { + if (run_opts->cancellation_manager && + run_opts->cancellation_manager->IsCancelled()) { + return errors::Cancelled(""); + } + + if (run_opts->remote_execution) { + // NOTE(mrry): This bit is only set for a local function when `parent_` + // calls back into this class, and the current implementation of + // `ProcessFunctionLibraryRuntime` currently always uses the asynchronous + // Run() method. + return errors::Unimplemented("Remote calling with RunSync()"); + } + + if (run_opts->create_rendezvous) { + *out_rendezvous = + absl::make_unique(device_mgr_); + run_opts->rendezvous = out_rendezvous->get(); + run_opts->create_rendezvous = false; + } + + LocalHandle local_handle = parent_->GetHandleOnDevice(device_name_, handle); + if (local_handle == kInvalidLocalHandle) { + *out_item = nullptr; + return Status::OK(); + } + + TF_RETURN_IF_ERROR(GetOrCreateItem(local_handle, out_item)); + + if (run_opts->runner == nullptr) { + run_opts->runner = &default_runner_; + } + DCHECK(run_opts->runner != nullptr); + + return Status::OK(); +} + +Status FunctionLibraryRuntimeImpl::RunSync(Options opts, Handle handle, + gtl::ArraySlice args, + std::vector* rets) { + Item* item = nullptr; + std::unique_ptr rendezvous; + TF_RETURN_IF_ERROR(PrepareRunSync(handle, &opts, &item, &rendezvous)); + if (item == nullptr) { + return parent_->RunSync(opts, handle, args, rets); + } + + Executor::Args exec_args; + const FunctionBody* fbody = GetFunctionBody(handle); + FunctionCallFrame frame(fbody->arg_types, fbody->ret_types); + TF_RETURN_IF_ERROR(frame.SetArgs(args)); + ExecutorArgsFromOptions(opts, &frame, &exec_args); + + TF_RETURN_IF_ERROR(item->exec->Run(exec_args)); + return frame.ConsumeRetvals(rets, opts.allow_dead_tensors); +} + +Status FunctionLibraryRuntimeImpl::RunSync(Options opts, Handle handle, + CallFrameInterface* call_frame) { + Item* item = nullptr; + std::unique_ptr rendezvous; + TF_RETURN_IF_ERROR(PrepareRunSync(handle, &opts, &item, &rendezvous)); + if (item == nullptr) { + return parent_->RunSync(opts, handle, call_frame); + } + + Executor::Args exec_args; + ExecutorArgsFromOptions(opts, call_frame, &exec_args); + return item->exec->Run(exec_args); +} + bool FunctionLibraryRuntimeImpl::IsStateful(const string& func) const { const OpDef* op_def; const Status s = base_lib_def_->LookUpOpDef(func, &op_def); diff --git a/tensorflow/core/common_runtime/process_function_library_runtime.cc b/tensorflow/core/common_runtime/process_function_library_runtime.cc index 42bde655735..c447832c91b 100644 --- a/tensorflow/core/common_runtime/process_function_library_runtime.cc +++ b/tensorflow/core/common_runtime/process_function_library_runtime.cc @@ -1488,6 +1488,33 @@ void ProcessFunctionLibraryRuntime::Run( }); } +Status ProcessFunctionLibraryRuntime::RunSync( + const FunctionLibraryRuntime::Options& opts, + FunctionLibraryRuntime::Handle handle, gtl::ArraySlice args, + std::vector* rets) const { + Notification n; + Status s; + Run(opts, handle, args, rets, [&n, &s](const Status& status) { + s.Update(status); + n.Notify(); + }); + n.WaitForNotification(); + return s; +} + +Status ProcessFunctionLibraryRuntime::RunSync( + const FunctionLibraryRuntime::Options& opts, + FunctionLibraryRuntime::Handle handle, CallFrameInterface* frame) const { + Notification n; + Status s; + Run(opts, handle, frame, [&n, &s](const Status& status) { + s.Update(status); + n.Notify(); + }); + n.WaitForNotification(); + return s; +} + void ProcessFunctionLibraryRuntime::Run( const FunctionLibraryRuntime::Options& opts, FunctionLibraryRuntime::Handle handle, const FunctionArgsInterface& args, diff --git a/tensorflow/core/common_runtime/process_function_library_runtime.h b/tensorflow/core/common_runtime/process_function_library_runtime.h index c71a6e647eb..104872e5a1c 100644 --- a/tensorflow/core/common_runtime/process_function_library_runtime.h +++ b/tensorflow/core/common_runtime/process_function_library_runtime.h @@ -194,6 +194,13 @@ class ProcessFunctionLibraryRuntime { const FunctionArgsInterface& args, std::vector* rets, FunctionLibraryRuntime::DoneCallback done) const; + Status RunSync(const FunctionLibraryRuntime::Options& opts, + FunctionLibraryRuntime::Handle handle, + gtl::ArraySlice args, std::vector* rets) const; + Status RunSync(const FunctionLibraryRuntime::Options& opts, + FunctionLibraryRuntime::Handle handle, + CallFrameInterface* frame) const; + const DeviceMgr* device_mgr() { return device_mgr_; } const std::shared_ptr device_set() { diff --git a/tensorflow/core/data/service/BUILD b/tensorflow/core/data/service/BUILD index 4838ca165f9..4a973423519 100644 --- a/tensorflow/core/data/service/BUILD +++ b/tensorflow/core/data/service/BUILD @@ -290,12 +290,34 @@ cc_library( ], ) +cc_library( + name = "data_service", + srcs = ["data_service.cc"], + hdrs = [ + "data_service.h", + ], + deps = [ + ":credentials_factory", + ":grpc_util", + ":master_cc_grpc_proto", + ":master_proto_cc", + ":worker_cc_grpc_proto", + ":worker_proto_cc", + "//tensorflow/core:framework", + "//tensorflow/core:lib", + "//tensorflow/core:lib_internal", + "//tensorflow/core:protos_all_cc", + tf_grpc_cc_dependency(), + ], +) + tf_cc_test( name = "data_service_test", srcs = ["data_service_test.cc"], tags = ["no_windows"], deps = [ ":compression_utils", + ":data_service", ":grpc_master_impl", ":grpc_util", ":grpc_worker_impl", diff --git a/tensorflow/core/data/service/data_service.cc b/tensorflow/core/data/service/data_service.cc new file mode 100644 index 00000000000..f961683a775 --- /dev/null +++ b/tensorflow/core/data/service/data_service.cc @@ -0,0 +1,140 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/data/service/data_service.h" + +#include "grpcpp/create_channel.h" +#include "grpcpp/security/credentials.h" +#include "tensorflow/core/data/service/credentials_factory.h" +#include "tensorflow/core/data/service/grpc_util.h" +#include "tensorflow/core/data/service/master.grpc.pb.h" +#include "tensorflow/core/data/service/worker.grpc.pb.h" +#include "tensorflow/core/framework/dataset.h" + +namespace tensorflow { +namespace data { + +Status DataServiceMasterClient::CreateJob(int64 dataset_id, + ProcessingMode processing_mode, + int64* job_id) { + TF_RETURN_IF_ERROR(EnsureInitialized()); + CreateJobRequest req; + req.set_dataset_id(dataset_id); + req.set_processing_mode(ProcessingModeDef(processing_mode)); + CreateJobResponse resp; + grpc::ClientContext client_ctx; + grpc::Status status = stub_->CreateJob(&client_ctx, req, &resp); + if (!status.ok()) { + return grpc_util::WrapError( + absl::StrCat("Failed to create job for dataset with id ", dataset_id), + status); + } + *job_id = resp.job_id(); + return Status::OK(); +} + +Status DataServiceMasterClient::RegisterDataset(GraphDef dataset, + int64* dataset_id) { + TF_RETURN_IF_ERROR(EnsureInitialized()); + GetOrRegisterDatasetRequest req; + *req.mutable_dataset()->mutable_graph() = dataset; + GetOrRegisterDatasetResponse resp; + grpc::ClientContext client_ctx; + grpc::Status status = stub_->GetOrRegisterDataset(&client_ctx, req, &resp); + if (!status.ok()) { + return grpc_util::WrapError("Failed to register dataset", status); + } + *dataset_id = resp.dataset_id(); + return Status::OK(); +} + +Status DataServiceMasterClient::GetTasks(int64 job_id, + std::vector* tasks, + bool* job_finished) { + TF_RETURN_IF_ERROR(EnsureInitialized()); + GetTasksRequest req; + req.set_job_id(job_id); + GetTasksResponse resp; + grpc_impl::ClientContext ctx; + grpc::Status s = stub_->GetTasks(&ctx, req, &resp); + if (!s.ok()) { + return grpc_util::WrapError("Failed to get tasks", s); + } + tasks->clear(); + for (auto& task : resp.task_info()) { + tasks->push_back(task); + } + *job_finished = resp.job_finished(); + return Status::OK(); +} + +Status DataServiceMasterClient::EnsureInitialized() { + std::shared_ptr credentials; + TF_RETURN_IF_ERROR( + CredentialsFactory::CreateClientCredentials(protocol_, &credentials)); + auto channel = grpc::CreateChannel(address_, credentials); + stub_ = MasterService::NewStub(channel); + return Status::OK(); +} + +Status DataServiceWorkerClient::GetElement(int64 task_id, + CompressedElement* element, + bool* end_of_sequence) { + TF_RETURN_IF_ERROR(EnsureInitialized()); + GetElementRequest req; + req.set_task_id(task_id); + GetElementResponse resp; + grpc_impl::ClientContext ctx; + grpc::Status s = stub_->GetElement(&ctx, req, &resp); + if (!s.ok()) { + return grpc_util::WrapError("Failed to get element", s); + } + *end_of_sequence = resp.end_of_sequence(); + if (!*end_of_sequence) { + *element = std::move(*resp.mutable_compressed_element()); + } + return Status::OK(); +} + +Status DataServiceWorkerClient::EnsureInitialized() { + std::shared_ptr credentials; + TF_RETURN_IF_ERROR( + CredentialsFactory::CreateClientCredentials(protocol_, &credentials)); + grpc::ChannelArguments args; + args.SetMaxReceiveMessageSize(-1); + auto channel = grpc::CreateCustomChannel(address_, credentials, args); + stub_ = WorkerService::NewStub(channel); + return Status::OK(); +} + +Status CreateDataServiceMasterClient( + absl::string_view address, absl::string_view protocol, + std::unique_ptr* out) { + auto client = absl::make_unique(address, protocol); + TF_RETURN_IF_ERROR(client->Initialize()); + *out = std::move(client); + return Status::OK(); +} + +Status CreateDataServiceWorkerClient( + absl::string_view address, absl::string_view protocol, + std::unique_ptr* out) { + auto client = absl::make_unique(address, protocol); + TF_RETURN_IF_ERROR(client->Initialize()); + *out = std::move(client); + return Status::OK(); +} +} // namespace data +} // namespace tensorflow diff --git a/tensorflow/core/data/service/data_service.h b/tensorflow/core/data/service/data_service.h new file mode 100644 index 00000000000..c54c0c33390 --- /dev/null +++ b/tensorflow/core/data/service/data_service.h @@ -0,0 +1,123 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_CORE_DATA_SERVICE_DATA_SERVICE_H_ +#define TENSORFLOW_CORE_DATA_SERVICE_DATA_SERVICE_H_ + +#include "tensorflow/core/data/service/master.grpc.pb.h" +#include "tensorflow/core/data/service/worker.grpc.pb.h" +#include "tensorflow/core/framework/dataset.h" +#include "tensorflow/core/framework/op_kernel.h" + +namespace tensorflow { +namespace data { + +// Modes for how a tf.data service job should process a dataset. +enum class ProcessingMode : int64 { + // Each tf.data worker processes an entire epoch. If a dataset contains 2 + // elements and there are 3 workers, the job will produce 6 elements. + PARALLEL_EPOCHS = 0, + // Processing of a single epoch is distributed across all tf.data workers. + ONE_EPOCH = 1, +}; + +// Base class for data service clients. Data service clients are +// thread-compatible, requiring external synchronization when used from multiple +// threads. +class DataServiceClientBase { + public: + DataServiceClientBase(absl::string_view address, absl::string_view protocol) + : address_(address), protocol_(protocol) {} + + virtual ~DataServiceClientBase() = default; + // Not copyable or movable. + DataServiceClientBase(const DataServiceClientBase&) = delete; + DataServiceClientBase& operator=(const DataServiceClientBase&) = delete; + + // Initializes the client. Calling `Initialize()` is not required since the + // first RPC will perform any necessary initialization. However, it can be + // useful to call `Initialize()` proactively so that any errors that happen + // during initialization can be surfaced earlier. + Status Initialize() { return EnsureInitialized(); } + + protected: + // Initializes the client if it isn't already initialized. + virtual Status EnsureInitialized() = 0; + + const std::string address_; + const std::string protocol_; +}; + +// Client for communicating with the tf.data service master. +class DataServiceMasterClient : public DataServiceClientBase { + public: + DataServiceMasterClient(absl::string_view address, absl::string_view protocol) + : DataServiceClientBase(address, protocol) {} + + // Registers a dataset with the tf.data service, and stores the generated + // dataset id in `*dataset_id`. + Status RegisterDataset(GraphDef dataset, int64* dataset_id); + + // Creates a new tf.data service job for the specified dataset. The id for the + // created job will be stored in `*job_id`. + Status CreateJob(int64 dataset_id, ProcessingMode processing_mode, + int64* job_id); + + // Queries the master for the tasks associated with the specified job. + // The tasks will be stored in *tasks, and whether the job is finished will + // be stored in `*job_finished`. + Status GetTasks(int64 job_id, std::vector* tasks, + bool* job_finished); + + protected: + Status EnsureInitialized() override; + + private: + std::unique_ptr stub_; +}; + +// Client for communicating with the tf.data service worker. +class DataServiceWorkerClient : public DataServiceClientBase { + public: + DataServiceWorkerClient(absl::string_view address, absl::string_view protocol) + : DataServiceClientBase(address, protocol) {} + + // Fetches the next element for the specified task_id. The element's + // compressed tensors will be stored in *element. If no element is available, + // `*end_of_sequence` will be `true`, and `element` will be left unchanged. + Status GetElement(int64 task_id, CompressedElement* element, + bool* end_of_sequence); + + protected: + Status EnsureInitialized() override; + + private: + std::unique_ptr stub_; +}; + +// Creates and initializes a new tf.data service master client. +Status CreateDataServiceMasterClient( + absl::string_view address, absl::string_view protocol, + std::unique_ptr* out); + +// Creates and initializes a new tf.data service worker client. +Status CreateDataServiceWorkerClient( + absl::string_view address, absl::string_view protocol, + std::unique_ptr* out); + +} // namespace data +} // namespace tensorflow + +#endif // TENSORFLOW_CORE_DATA_SERVICE_DATA_SERVICE_H_ diff --git a/tensorflow/core/data/service/data_service_test.cc b/tensorflow/core/data/service/data_service_test.cc index 77dad4522f5..f4c3c0e13e7 100644 --- a/tensorflow/core/data/service/data_service_test.cc +++ b/tensorflow/core/data/service/data_service_test.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tensorflow/core/data/service/data_service.h" + #include "grpcpp/create_channel.h" #include "grpcpp/security/credentials.h" #include "absl/strings/str_split.h" @@ -34,97 +36,33 @@ namespace tensorflow { namespace data { namespace { -Status RegisterDataset(MasterService::Stub* master_stub, - const GraphDef& dataset_graph, int64* dataset_id) { - grpc_impl::ClientContext ctx; - GetOrRegisterDatasetRequest req; - *req.mutable_dataset()->mutable_graph() = dataset_graph; - GetOrRegisterDatasetResponse resp; - grpc::Status s = master_stub->GetOrRegisterDataset(&ctx, req, &resp); - if (!s.ok()) { - return grpc_util::WrapError("Failed to register dataset", s); - } - *dataset_id = resp.dataset_id(); - return Status::OK(); -} - -Status CreateJob(MasterService::Stub* master_stub, int64 dataset_id, - int64* job_id) { - grpc_impl::ClientContext ctx; - CreateJobRequest req; - req.set_dataset_id(dataset_id); - CreateJobResponse resp; - grpc::Status s = master_stub->CreateJob(&ctx, req, &resp); - if (!s.ok()) { - return grpc_util::WrapError("Failed to begin epoch", s); - } - *job_id = resp.job_id(); - return Status::OK(); -} - -Status GetTasks(MasterService::Stub* master_stub, int64 job_id, - std::vector* tasks) { - grpc_impl::ClientContext ctx; - GetTasksRequest req; - req.set_job_id(job_id); - GetTasksResponse resp; - grpc::Status s = master_stub->GetTasks(&ctx, req, &resp); - if (!s.ok()) { - return grpc_util::WrapError("Failed to get tasks", s); - } - tasks->clear(); - for (auto& task : resp.task_info()) { - tasks->push_back(task); - } - return Status::OK(); -} - -Status GetElement(WorkerService::Stub* worker_stub, int64 task_id, - std::vector* element, bool* end_of_sequence) { - grpc_impl::ClientContext ctx; - GetElementRequest req; - req.set_task_id(task_id); - GetElementResponse resp; - grpc::Status s = worker_stub->GetElement(&ctx, req, &resp); - if (!s.ok()) { - return grpc_util::WrapError("Failed to get element", s); - } - *end_of_sequence = resp.end_of_sequence(); - if (!*end_of_sequence) { - const CompressedElement& compressed = resp.compressed_element(); - TF_RETURN_IF_ERROR(service_util::Uncompress(compressed, element)); - } - return Status::OK(); -} +constexpr const char kProtocol[] = "grpc+local"; Status CheckWorkerOutput(const std::string& worker_address, int64 task_id, std::vector> expected_output) { - auto worker_channel = grpc::CreateChannel( - worker_address, grpc::experimental::LocalCredentials(LOCAL_TCP)); - std::unique_ptr worker_stub = - WorkerService::NewStub(worker_channel); + DataServiceWorkerClient worker(worker_address, kProtocol); for (std::vector& expected : expected_output) { bool end_of_sequence; - std::vector element; + CompressedElement compressed; TF_RETURN_IF_ERROR( - GetElement(worker_stub.get(), task_id, &element, &end_of_sequence)); + worker.GetElement(task_id, &compressed, &end_of_sequence)); if (end_of_sequence) { return errors::Internal("Reached end of sequence too early."); } + std::vector element; + TF_RETURN_IF_ERROR(service_util::Uncompress(compressed, &element)); TF_RETURN_IF_ERROR(DatasetOpsTestBase::ExpectEqual(element, expected, /*compare_order=*/true)); } // Call GetElement a couple more times to verify tha end_of_sequence keeps // returning true. bool end_of_sequence; - std::vector element; - TF_RETURN_IF_ERROR( - GetElement(worker_stub.get(), task_id, &element, &end_of_sequence)); + CompressedElement compressed; + TF_RETURN_IF_ERROR(worker.GetElement(task_id, &compressed, &end_of_sequence)); if (!end_of_sequence) { return errors::Internal("Expected end_of_sequence to be true"); } - TF_RETURN_IF_ERROR( - GetElement(worker_stub.get(), task_id, &element, &end_of_sequence)); + TF_RETURN_IF_ERROR(worker.GetElement(task_id, &compressed, &end_of_sequence)); if (!end_of_sequence) { return errors::Internal("Expected end_of_sequence to be true"); } @@ -138,22 +76,21 @@ TEST(DataService, IterateDatasetOneWorker) { TF_ASSERT_OK(cluster.Initialize()); test_util::GraphDefTestCase test_case; TF_ASSERT_OK(test_util::map_test_case(&test_case)); - auto master_channel = grpc::CreateChannel( - cluster.MasterAddress(), grpc::experimental::LocalCredentials(LOCAL_TCP)); - std::unique_ptr master_stub = - MasterService::NewStub(master_channel); + DataServiceMasterClient master(cluster.MasterAddress(), kProtocol); int64 dataset_id; - TF_ASSERT_OK( - RegisterDataset(master_stub.get(), test_case.graph_def, &dataset_id)); + TF_ASSERT_OK(master.RegisterDataset(test_case.graph_def, &dataset_id)); int64 job_id; - TF_ASSERT_OK(CreateJob(master_stub.get(), dataset_id, &job_id)); + TF_ASSERT_OK( + master.CreateJob(dataset_id, ProcessingMode::PARALLEL_EPOCHS, &job_id)); std::vector tasks; - TF_ASSERT_OK(GetTasks(master_stub.get(), job_id, &tasks)); + bool job_finished; + TF_ASSERT_OK(master.GetTasks(job_id, &tasks, &job_finished)); ASSERT_EQ(tasks.size(), 1); - ASSERT_EQ(tasks[0].worker_address(), cluster.WorkerAddress(0)); + EXPECT_EQ(tasks[0].worker_address(), cluster.WorkerAddress(0)); + EXPECT_FALSE(job_finished); - TF_ASSERT_OK(CheckWorkerOutput(tasks[0].worker_address(), tasks[0].id(), + TF_EXPECT_OK(CheckWorkerOutput(tasks[0].worker_address(), tasks[0].id(), test_case.output)); } @@ -162,23 +99,22 @@ TEST(DataService, IterateDatasetTwoWorkers) { TF_ASSERT_OK(cluster.Initialize()); test_util::GraphDefTestCase test_case; TF_ASSERT_OK(test_util::map_test_case(&test_case)); - auto master_channel = grpc::CreateChannel( - cluster.MasterAddress(), grpc::experimental::LocalCredentials(LOCAL_TCP)); - std::unique_ptr master_stub = - MasterService::NewStub(master_channel); + DataServiceMasterClient master(cluster.MasterAddress(), kProtocol); int64 dataset_id; - TF_ASSERT_OK( - RegisterDataset(master_stub.get(), test_case.graph_def, &dataset_id)); + TF_ASSERT_OK(master.RegisterDataset(test_case.graph_def, &dataset_id)); int64 job_id; - TF_ASSERT_OK(CreateJob(master_stub.get(), dataset_id, &job_id)); + TF_ASSERT_OK( + master.CreateJob(dataset_id, ProcessingMode::PARALLEL_EPOCHS, &job_id)); std::vector tasks; - TF_ASSERT_OK(GetTasks(master_stub.get(), job_id, &tasks)); - ASSERT_EQ(tasks.size(), 2); + bool job_finished; + TF_EXPECT_OK(master.GetTasks(job_id, &tasks, &job_finished)); + EXPECT_EQ(tasks.size(), 2); + EXPECT_FALSE(job_finished); // Each worker produces the full dataset. for (TaskInfo task : tasks) { - TF_ASSERT_OK( + TF_EXPECT_OK( CheckWorkerOutput(task.worker_address(), task.id(), test_case.output)); } } @@ -188,26 +124,26 @@ TEST(DataService, AddWorkerMidEpoch) { TF_ASSERT_OK(cluster.Initialize()); test_util::GraphDefTestCase test_case; TF_ASSERT_OK(test_util::map_test_case(&test_case)); - auto master_channel = grpc::CreateChannel( - cluster.MasterAddress(), grpc::experimental::LocalCredentials(LOCAL_TCP)); - std::unique_ptr master_stub = - MasterService::NewStub(master_channel); + DataServiceMasterClient master(cluster.MasterAddress(), kProtocol); int64 dataset_id; - TF_ASSERT_OK( - RegisterDataset(master_stub.get(), test_case.graph_def, &dataset_id)); + TF_ASSERT_OK(master.RegisterDataset(test_case.graph_def, &dataset_id)); int64 job_id; - TF_ASSERT_OK(CreateJob(master_stub.get(), dataset_id, &job_id)); + TF_ASSERT_OK( + master.CreateJob(dataset_id, ProcessingMode::PARALLEL_EPOCHS, &job_id)); std::vector tasks; - TF_ASSERT_OK(GetTasks(master_stub.get(), job_id, &tasks)); - ASSERT_EQ(tasks.size(), 1); + bool job_finished; + TF_ASSERT_OK(master.GetTasks(job_id, &tasks, &job_finished)); + EXPECT_EQ(tasks.size(), 1); + EXPECT_FALSE(job_finished); TF_ASSERT_OK(cluster.AddWorker()); - TF_ASSERT_OK(GetTasks(master_stub.get(), job_id, &tasks)); - ASSERT_EQ(tasks.size(), 2); + TF_EXPECT_OK(master.GetTasks(job_id, &tasks, &job_finished)); + EXPECT_EQ(tasks.size(), 2); + EXPECT_FALSE(job_finished); // Each worker produces the full dataset. for (TaskInfo task : tasks) { - TF_ASSERT_OK( + TF_EXPECT_OK( CheckWorkerOutput(task.worker_address(), task.id(), test_case.output)); } } diff --git a/tensorflow/core/framework/function.h b/tensorflow/core/framework/function.h index b6adf5b3190..b64047e999f 100644 --- a/tensorflow/core/framework/function.h +++ b/tensorflow/core/framework/function.h @@ -730,6 +730,12 @@ class FunctionLibraryRuntime { virtual void Run(const Options& opts, Handle handle, CallFrameInterface* call_frame, DoneCallback done) = 0; + virtual Status RunSync(Options opts, Handle handle, + gtl::ArraySlice args, + std::vector* rets) = 0; + virtual Status RunSync(Options opts, Handle handle, + CallFrameInterface* call_frame) = 0; + // Creates a "kernel" for the given NodeProperties "props". // // If succeeds, returns OK and the caller takes the ownership of the diff --git a/tensorflow/core/graph/BUILD b/tensorflow/core/graph/BUILD index 65e4175793b..ebaec566695 100644 --- a/tensorflow/core/graph/BUILD +++ b/tensorflow/core/graph/BUILD @@ -154,14 +154,6 @@ filegroup( ], ) -filegroup( - name = "mkl_related_tests", - srcs = [ - "mkl_layout_pass_test.cc", - "mkl_tfconversion_pass_test.cc", - ], -) - filegroup( name = "mobile_srcs_only_runtime", srcs = [ diff --git a/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc b/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc index 6f5c32edf26..64bb4528f62 100644 --- a/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc +++ b/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc @@ -26,12 +26,128 @@ limitations under the License. #include "tensorflow/core/grappler/optimizers/data/graph_utils.h" #include "tensorflow/core/grappler/utils.h" #include "tensorflow/core/platform/protobuf.h" +#include "tensorflow/core/platform/strcat.h" namespace tensorflow { namespace grappler { namespace { -constexpr char kFusedOpName[] = "ShuffleAndRepeatDataset"; +constexpr char kShuffleDataset[] = "ShuffleDataset"; +constexpr char kShuffleDatasetV2[] = "ShuffleDatasetV2"; +constexpr char kShuffleDatasetV3[] = "ShuffleDatasetV3"; +constexpr char kRepeatDataset[] = "RepeatDataset"; +constexpr char kShuffleAndRepeatDataset[] = "ShuffleAndRepeatDataset"; +constexpr char kShuffleAndRepeatDatasetV2[] = "ShuffleAndRepeatDatasetV2"; + +constexpr char kOutputShapes[] = "output_shapes"; +constexpr char kOutputTypes[] = "output_types"; +constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration"; + +Status FuseShuffleV1AndRepeat(const NodeDef& shuffle_node, + const NodeDef& repeat_node, + MutableGraphView* graph, GraphDef* output, + NodeDef* fused_node) { + fused_node->set_op(kShuffleAndRepeatDataset); + graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDataset, output, + fused_node); + + // Set the `input` input argument. + fused_node->add_input(shuffle_node.input(0)); + + // Set the `buffer_size` input argument. + fused_node->add_input(shuffle_node.input(1)); + + // Set the `seed` input argument. + fused_node->add_input(shuffle_node.input(2)); + + // Set the `seed2` input argument. + fused_node->add_input(shuffle_node.input(3)); + + // Set the `count` input argument. + fused_node->add_input(repeat_node.input(1)); + + // Set `output_types`, `output_shapes`, and `reshuffle_each_iteration` + // attributes. + for (auto key : {kOutputShapes, kOutputTypes, kReshuffleEachIteration}) { + graph_utils::CopyAttribute(key, shuffle_node, fused_node); + } + + return Status::OK(); +} + +Status FuseShuffleV2AndRepeat(const NodeDef& shuffle_node, + const NodeDef& repeat_node, + MutableGraphView* graph, GraphDef* output, + NodeDef* fused_node) { + fused_node->set_op(kShuffleAndRepeatDatasetV2); + graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDatasetV2, output, + fused_node); + + NodeDef zero_node = *graph_utils::AddScalarConstNode(0, graph); + + // Set the `input` input argument. + fused_node->add_input(shuffle_node.input(0)); + + // Set the `buffer_size` input argument. + fused_node->add_input(shuffle_node.input(1)); + + // Default the `seed` input argument to 0. + fused_node->add_input(zero_node.name()); + + // Default the `seed2` input argument to 0. + fused_node->add_input(zero_node.name()); + + // Set the `count` input argument. + fused_node->add_input(repeat_node.input(1)); + + // Set the `seed_generator` input argument. + fused_node->add_input(shuffle_node.input(2)); + + // Set `output_types` and `output_shapes` attributes. + for (auto key : {kOutputShapes, kOutputTypes}) { + graph_utils::CopyAttribute(key, shuffle_node, fused_node); + } + + // Default the `reshuffle_each_iteration` attribute to true. + (*fused_node->mutable_attr())[kReshuffleEachIteration].set_b(true); + + return Status::OK(); +} + +Status FuseShuffleV3AndRepeat(const NodeDef& shuffle_node, + const NodeDef& repeat_node, + MutableGraphView* graph, GraphDef* output, + NodeDef* fused_node) { + fused_node->set_op(kShuffleAndRepeatDatasetV2); + graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDataset, output, + fused_node); + + // Set the `input` input argument. + fused_node->add_input(shuffle_node.input(0)); + + // Set the `buffer_size` input argument. + fused_node->add_input(shuffle_node.input(1)); + + // Set the `seed` input argument. + fused_node->add_input(shuffle_node.input(2)); + + // Set the `seed2` input argument. + fused_node->add_input(shuffle_node.input(3)); + + // Set the `count` input argument. + fused_node->add_input(repeat_node.input(1)); + + // Set the `seed_generator` input argument. + fused_node->add_input(shuffle_node.input(4)); + + // Set `output_types`, `output_shapes`, and `reshuffle_each_iteration` + // attributes. + for (auto key : {kOutputShapes, kOutputTypes, kReshuffleEachIteration}) { + graph_utils::CopyAttribute(key, shuffle_node, fused_node); + } + + return Status::OK(); +} } // namespace @@ -42,65 +158,46 @@ Status ShuffleAndRepeatFusion::OptimizeAndCollectStats( MutableGraphView graph(output); absl::flat_hash_set nodes_to_delete; - auto make_shuffle_and_repeat_node = [&output](const NodeDef& shuffle_node, - const NodeDef& repeat_node) { - NodeDef new_node; - new_node.set_op(kFusedOpName); - graph_utils::SetUniqueGraphNodeName(kFusedOpName, output, &new_node); - - // Set the `input` input argument. - new_node.add_input(shuffle_node.input(0)); - - // Set the `buffer_size` input argument. - new_node.add_input(shuffle_node.input(1)); - - // Set the `seed` input argument. - new_node.add_input(shuffle_node.input(2)); - - // Set the `seed2` input argument. - new_node.add_input(shuffle_node.input(3)); - - // Set the `count` input argument. - new_node.add_input(repeat_node.input(1)); - - // Set `output_types` and `output_shapes` attributes. - for (auto key : {"output_shapes", "output_types"}) { - graph_utils::CopyAttribute(key, repeat_node, &new_node); - } - return new_node; - }; - - for (const NodeDef& node : item.graph.node()) { - if (node.op() != "RepeatDataset") { + for (const NodeDef& repeat_node : item.graph.node()) { + if (repeat_node.op() != kRepeatDataset) { continue; } - // Use a more descriptive variable name now that we know the node type. - const NodeDef& repeat_node = node; - NodeDef* node2 = graph_utils::GetInputNode(repeat_node, graph); + const NodeDef& shuffle_node = + *graph_utils::GetInputNode(repeat_node, graph); - if (node2->op() != "ShuffleDataset") { + NodeDef fused_node; + if (shuffle_node.op() == kShuffleDataset) { + TF_RETURN_IF_ERROR(FuseShuffleV1AndRepeat(shuffle_node, repeat_node, + &graph, output, &fused_node)); + } else if (shuffle_node.op() == kShuffleDatasetV2) { + TF_RETURN_IF_ERROR(FuseShuffleV2AndRepeat(shuffle_node, repeat_node, + &graph, output, &fused_node)); + + } else if (shuffle_node.op() == kShuffleDatasetV3) { + TF_RETURN_IF_ERROR(FuseShuffleV3AndRepeat(shuffle_node, repeat_node, + &graph, output, &fused_node)); + } else { continue; } - // Use a more descriptive variable name now that we know the node type. - const NodeDef& shuffle_node = *node2; - - // TODO(b/129712758): Remove when the fused kernel supports disabling - // reshuffling for each iteration. - if (HasNodeAttr(shuffle_node, "reshuffle_each_iteration") && - !shuffle_node.attr().at("reshuffle_each_iteration").b()) { - continue; - } - - NodeDef* shuffle_and_repeat_node = - graph.AddNode(make_shuffle_and_repeat_node(shuffle_node, repeat_node)); + NodeDef& shuffle_and_repeat_node = *graph.AddNode(std::move(fused_node)); TF_RETURN_IF_ERROR(graph.UpdateFanouts(repeat_node.name(), - shuffle_and_repeat_node->name())); + shuffle_and_repeat_node.name())); + // Update shuffle node fanouts to shuffle_and_repeat fanouts to take care of + // control dependencies. + TF_RETURN_IF_ERROR(graph.UpdateFanouts(shuffle_node.name(), + shuffle_and_repeat_node.name())); - // Mark the `Shuffle` and `Repeat` nodes for removal. - nodes_to_delete.insert(shuffle_node.name()); - nodes_to_delete.insert(repeat_node.name()); + // Mark the `Shuffle` and `Repeat` nodes for removal (as long as neither of + // them needs to be preserved). + const auto nodes_to_preserve = item.NodesToPreserve(); + if (nodes_to_preserve.find(shuffle_node.name()) == + nodes_to_preserve.end() && + nodes_to_preserve.find(repeat_node.name()) == nodes_to_preserve.end()) { + nodes_to_delete.insert(shuffle_node.name()); + nodes_to_delete.insert(repeat_node.name()); + } stats->num_changes++; } diff --git a/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion_test.cc b/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion_test.cc index 556e1d3ab57..9a5c454ad0c 100644 --- a/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion_test.cc +++ b/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion_test.cc @@ -25,17 +25,21 @@ namespace tensorflow { namespace grappler { namespace { -TEST(ShuffleAndRepeatFusionTest, FuseShuffleAndRepeatNodesIntoOne) { +constexpr char kOutputShapes[] = "output_shapes"; +constexpr char kOutputTypes[] = "output_types"; +constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration"; + +TEST(ShuffleAndRepeatFusionTest, FuseShuffleV1AndRepeat) { GrapplerItem item; MutableGraphView graph(&item.graph); std::vector> common_attrs(2); AttrValue shapes_attr; - SetAttrValue("output_shapes", &shapes_attr); - common_attrs[0] = std::make_pair("output_shapes", shapes_attr); + SetAttrValue(kOutputShapes, &shapes_attr); + common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr); AttrValue types_attr; - SetAttrValue("output_types", &types_attr); - common_attrs[1] = std::make_pair("output_types", types_attr); + SetAttrValue(kOutputTypes, &types_attr); + common_attrs[1] = std::make_pair(kOutputTypes, types_attr); NodeDef *start_node = graph_utils::AddScalarConstNode(0, &graph); NodeDef *stop_node = graph_utils::AddScalarConstNode(10, &graph); @@ -59,6 +63,7 @@ TEST(ShuffleAndRepeatFusionTest, FuseShuffleAndRepeatNodesIntoOne) { shuffle_inputs[3] = seed2_node->name(); NodeDef *shuffle_node = graph_utils::AddNode( "", "ShuffleDataset", shuffle_inputs, common_attrs, &graph); + (*shuffle_node->mutable_attr())[kReshuffleEachIteration].set_b(true); NodeDef *count_node = graph_utils::AddScalarConstNode(-1, &graph); std::vector repeat_inputs(2); @@ -85,12 +90,148 @@ TEST(ShuffleAndRepeatFusionTest, FuseShuffleAndRepeatNodesIntoOne) { EXPECT_EQ(shuffle_and_repeat_node.input(2), shuffle_node->input(2)); EXPECT_EQ(shuffle_and_repeat_node.input(3), shuffle_node->input(3)); EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1)); + for (const auto &attr : + {kOutputShapes, kOutputTypes, kReshuffleEachIteration}) { + EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr), + shuffle_node->attr().at(attr))); + } +} + +TEST(ShuffleAndRepeatFusionTest, FuseShuffleV2AndRepeat) { + GrapplerItem item; + MutableGraphView graph(&item.graph); + + std::vector> common_attrs(2); + AttrValue shapes_attr; + SetAttrValue(kOutputShapes, &shapes_attr); + common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr); + AttrValue types_attr; + SetAttrValue(kOutputTypes, &types_attr); + common_attrs[1] = std::make_pair(kOutputTypes, types_attr); + + NodeDef *start_node = graph_utils::AddScalarConstNode(0, &graph); + NodeDef *stop_node = graph_utils::AddScalarConstNode(10, &graph); + NodeDef *step_node = graph_utils::AddScalarConstNode(1, &graph); + + std::vector range_inputs(3); + range_inputs[0] = start_node->name(); + range_inputs[1] = stop_node->name(); + range_inputs[2] = step_node->name(); + NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs, + common_attrs, &graph); + + NodeDef *buffer_size_node = + graph_utils::AddScalarConstNode(128, &graph); + NodeDef *seed_generator_node = + graph_utils::AddScalarConstNode("dummy_resource", &graph); + std::vector shuffle_inputs(3); + shuffle_inputs[0] = range_node->name(); + shuffle_inputs[1] = buffer_size_node->name(); + shuffle_inputs[2] = seed_generator_node->name(); + NodeDef *shuffle_node = graph_utils::AddNode( + "", "ShuffleDatasetV2", shuffle_inputs, common_attrs, &graph); + + NodeDef *count_node = graph_utils::AddScalarConstNode(-1, &graph); + std::vector repeat_inputs(2); + repeat_inputs[0] = shuffle_node->name(); + repeat_inputs[1] = count_node->name(); + NodeDef *repeat_node = graph_utils::AddNode( + "", "RepeatDataset", repeat_inputs, common_attrs, &graph); + + ShuffleAndRepeatFusion optimizer; + GraphDef output; + TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); + + EXPECT_FALSE( + graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output)); + EXPECT_FALSE( + graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output)); EXPECT_TRUE( - AreAttrValuesEqual(shuffle_and_repeat_node.attr().at("output_shapes"), - repeat_node->attr().at("output_shapes"))); + graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDatasetV2", output)); + NodeDef shuffle_and_repeat_node = output.node( + graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDatasetV2", output)); + EXPECT_EQ(shuffle_and_repeat_node.input_size(), 6); + EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0)); + EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1)); + EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1)); + EXPECT_EQ(shuffle_and_repeat_node.input(5), shuffle_node->input(2)); + for (const auto &attr : {kOutputShapes, kOutputTypes}) { + EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr), + shuffle_node->attr().at(attr))); + } + EXPECT_TRUE(shuffle_and_repeat_node.attr().at(kReshuffleEachIteration).b()); +} + +TEST(ShuffleAndRepeatFusionTest, FuseShuffleV3AndRepeat) { + GrapplerItem item; + MutableGraphView graph(&item.graph); + + std::vector> common_attrs(2); + AttrValue shapes_attr; + SetAttrValue(kOutputShapes, &shapes_attr); + common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr); + AttrValue types_attr; + SetAttrValue(kOutputTypes, &types_attr); + common_attrs[1] = std::make_pair(kOutputTypes, types_attr); + + NodeDef *start_node = graph_utils::AddScalarConstNode(0, &graph); + NodeDef *stop_node = graph_utils::AddScalarConstNode(10, &graph); + NodeDef *step_node = graph_utils::AddScalarConstNode(1, &graph); + + std::vector range_inputs(3); + range_inputs[0] = start_node->name(); + range_inputs[1] = stop_node->name(); + range_inputs[2] = step_node->name(); + NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs, + common_attrs, &graph); + + NodeDef *buffer_size_node = + graph_utils::AddScalarConstNode(128, &graph); + NodeDef *seed_node = graph_utils::AddScalarConstNode(-1, &graph); + NodeDef *seed2_node = graph_utils::AddScalarConstNode(-1, &graph); + NodeDef *seed_generator_node = + graph_utils::AddScalarConstNode("dummy_resource", &graph); + std::vector shuffle_inputs(5); + shuffle_inputs[0] = range_node->name(); + shuffle_inputs[1] = buffer_size_node->name(); + shuffle_inputs[2] = seed_node->name(); + shuffle_inputs[3] = seed2_node->name(); + shuffle_inputs[4] = seed_generator_node->name(); + NodeDef *shuffle_node = graph_utils::AddNode( + "", "ShuffleDatasetV3", shuffle_inputs, common_attrs, &graph); + (*shuffle_node->mutable_attr())[kReshuffleEachIteration].set_b(true); + + NodeDef *count_node = graph_utils::AddScalarConstNode(-1, &graph); + std::vector repeat_inputs(2); + repeat_inputs[0] = shuffle_node->name(); + repeat_inputs[1] = count_node->name(); + NodeDef *repeat_node = graph_utils::AddNode( + "", "RepeatDataset", repeat_inputs, common_attrs, &graph); + + ShuffleAndRepeatFusion optimizer; + GraphDef output; + TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); + + EXPECT_FALSE( + graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output)); + EXPECT_FALSE( + graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output)); EXPECT_TRUE( - AreAttrValuesEqual(shuffle_and_repeat_node.attr().at("output_types"), - repeat_node->attr().at("output_types"))); + graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDatasetV2", output)); + NodeDef shuffle_and_repeat_node = output.node( + graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDatasetV2", output)); + EXPECT_EQ(shuffle_and_repeat_node.input_size(), 6); + EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0)); + EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1)); + EXPECT_EQ(shuffle_and_repeat_node.input(2), shuffle_node->input(2)); + EXPECT_EQ(shuffle_and_repeat_node.input(3), shuffle_node->input(3)); + EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1)); + EXPECT_EQ(shuffle_and_repeat_node.input(5), shuffle_node->input(4)); + for (const auto &attr : + {kOutputShapes, kOutputTypes, kReshuffleEachIteration}) { + EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr), + shuffle_node->attr().at(attr))); + } } TEST(ShuffleAndRepeatFusionTest, NoChange) { @@ -99,11 +240,11 @@ TEST(ShuffleAndRepeatFusionTest, NoChange) { std::vector> common_attrs(2); AttrValue shapes_attr; - SetAttrValue("output_shapes", &shapes_attr); - common_attrs[0] = std::make_pair("output_shapes", shapes_attr); + SetAttrValue(kOutputShapes, &shapes_attr); + common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr); AttrValue types_attr; - SetAttrValue("output_types", &types_attr); - common_attrs[1] = std::make_pair("output_types", types_attr); + SetAttrValue(kOutputTypes, &types_attr); + common_attrs[1] = std::make_pair(kOutputTypes, types_attr); NodeDef *start_node = graph_utils::AddScalarConstNode(0, &graph); NodeDef *stop_node = graph_utils::AddScalarConstNode(10, &graph); diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD index c42961525f4..a2d850d4c1c 100644 --- a/tensorflow/core/kernels/BUILD +++ b/tensorflow/core/kernels/BUILD @@ -4540,6 +4540,9 @@ tf_cuda_cc_test( name = "split_v_op_test", size = "small", srcs = ["split_v_op_test.cc"], + tags = [ + "no_windows", # split_v_op uses lrand48 which does not exist on Windows + ], deps = [ ":ops_testutil", ":ops_util", diff --git a/tensorflow/core/kernels/data/captured_function.cc b/tensorflow/core/kernels/data/captured_function.cc index d0906a5d749..b4244c95395 100644 --- a/tensorflow/core/kernels/data/captured_function.cc +++ b/tensorflow/core/kernels/data/captured_function.cc @@ -671,20 +671,13 @@ Status InstantiatedCapturedFunction::Run(IteratorContext* ctx, OwnedArgsCallFrame frame(std::move(args), &captured_func_->captured_inputs(), ret_types_); - Notification n; - Status s; profiler::TraceMe activity( [&] { return absl::StrCat( "InstantiatedCapturedFunction::Run#id=", f_opts.step_id, "#"); }, profiler::TraceMeLevel::kInfo); - lib_->Run(f_opts, f_handle_, &frame, [&n, &s](const Status& func_status) { - s.Update(func_status); - n.Notify(); - }); - n.WaitForNotification(); - TF_RETURN_IF_ERROR(s); + TF_RETURN_IF_ERROR(lib_->RunSync(std::move(f_opts), f_handle_, &frame)); return frame.ConsumeRetvals(rets); } @@ -709,9 +702,6 @@ Status InstantiatedCapturedFunction::RunWithBorrowedArgs( BorrowedArgsCallFrame frame(args, &captured_func_->captured_inputs(), ret_types_); - Notification n; - Status s; - profiler::TraceMe activity( [&] { return absl::StrCat( @@ -719,12 +709,7 @@ Status InstantiatedCapturedFunction::RunWithBorrowedArgs( f_opts.step_id, "#"); }, profiler::TraceMeLevel::kInfo); - lib_->Run(f_opts, f_handle_, &frame, [&n, &s](const Status& func_status) { - s.Update(func_status); - n.Notify(); - }); - n.WaitForNotification(); - TF_RETURN_IF_ERROR(s); + TF_RETURN_IF_ERROR(lib_->RunSync(std::move(f_opts), f_handle_, &frame)); return frame.ConsumeRetvals(rets); } @@ -748,21 +733,13 @@ Status InstantiatedCapturedFunction::RunInstantiated( BorrowedArgsCallFrame frame(args, &captured_func_->captured_inputs(), ret_types_); - Notification n; - Status s; - profiler::TraceMe activity( [&] { return absl::StrCat("InstantiatedCapturedFunction::RunInstantiated#id=", f_opts.step_id, "#"); }, profiler::TraceMeLevel::kInfo); - lib_->Run(f_opts, f_handle_, &frame, [&n, &s](const Status& func_status) { - s.Update(func_status); - n.Notify(); - }); - n.WaitForNotification(); - TF_RETURN_IF_ERROR(s); + TF_RETURN_IF_ERROR(lib_->RunSync(std::move(f_opts), f_handle_, &frame)); return frame.ConsumeRetvals(rets); } diff --git a/tensorflow/core/kernels/data/experimental/BUILD b/tensorflow/core/kernels/data/experimental/BUILD index d6c9f3c077d..d61c574cb35 100644 --- a/tensorflow/core/kernels/data/experimental/BUILD +++ b/tensorflow/core/kernels/data/experimental/BUILD @@ -1,7 +1,6 @@ # Description: # Contains experimental kernels for datasets and iterators. -load("//tensorflow:tensorflow.bzl", "tf_grpc_cc_dependency") load( "//tensorflow:tensorflow.bzl", "tf_cc_test", @@ -132,12 +131,9 @@ tf_kernel_library( "//tensorflow/core:lib", "//tensorflow/core:lib_internal", "//tensorflow/core:protos_all_cc", + "//tensorflow/core/data/service:common_proto_cc", "//tensorflow/core/data/service:compression_utils", - "//tensorflow/core/data/service:credentials_factory", - "//tensorflow/core/data/service:grpc_util", - "//tensorflow/core/data/service:master_cc_grpc_proto", - "//tensorflow/core/data/service:master_proto_cc", - "//tensorflow/core/data/service:worker_cc_grpc_proto", + "//tensorflow/core/data/service:data_service", "//tensorflow/core/distributed_runtime/rpc:grpc_util", "//tensorflow/core/kernels/data:dataset_utils", "//tensorflow/core/kernels/data:name_utils", @@ -145,7 +141,6 @@ tf_kernel_library( "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/strings", - tf_grpc_cc_dependency(), ], ) @@ -158,13 +153,9 @@ tf_kernel_library( "//tensorflow/core:framework", "//tensorflow/core:lib", "//tensorflow/core:lib_internal", - "//tensorflow/core/data/service:credentials_factory", - "//tensorflow/core/data/service:grpc_util", - "//tensorflow/core/data/service:master_cc_grpc_proto", - "//tensorflow/core/data/service:master_proto_cc", + "//tensorflow/core/data/service:data_service", "//tensorflow/core/kernels/data:dataset_utils", "//tensorflow/core/kernels/data:iterator_ops", - tf_grpc_cc_dependency(), ], ) diff --git a/tensorflow/core/kernels/data/experimental/data_service_dataset_op.cc b/tensorflow/core/kernels/data/experimental/data_service_dataset_op.cc index d32383abad7..815468d98a3 100644 --- a/tensorflow/core/kernels/data/experimental/data_service_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/data_service_dataset_op.cc @@ -18,18 +18,12 @@ limitations under the License. #include #include -#include "grpcpp/create_channel.h" -#include "grpcpp/impl/codegen/server_context.h" -#include "grpcpp/security/credentials.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" +#include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/data/service/compression_utils.h" -#include "tensorflow/core/data/service/credentials_factory.h" -#include "tensorflow/core/data/service/grpc_util.h" -#include "tensorflow/core/data/service/master.grpc.pb.h" -#include "tensorflow/core/data/service/master.pb.h" -#include "tensorflow/core/data/service/worker.grpc.pb.h" +#include "tensorflow/core/data/service/data_service.h" #include "tensorflow/core/distributed_runtime/rpc/grpc_util.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/model.h" @@ -155,8 +149,6 @@ class DataServiceDatasetOp::Dataset : public DatasetBase { "over the dataset via `create_iterator(dataset, job_token).`"); } job_id_ = ctx->job_token().job_id(); - TF_RETURN_IF_ERROR(CredentialsFactory::CreateClientCredentials( - dataset()->protocol_, &credentials_)); return Status::OK(); } @@ -212,7 +204,7 @@ class DataServiceDatasetOp::Dataset : public DatasetBase { int64 task_id; // Cached address of the worker for task `task_id`. std::string address; - std::unique_ptr worker_stub; + std::unique_ptr worker; std::unique_ptr thread; bool end_of_sequence = false; // Indicates that the thread has finished running. @@ -225,9 +217,7 @@ class DataServiceDatasetOp::Dataset : public DatasetBase { // the list of tasks changes. void TaskThreadManager(std::unique_ptr ctx) { VLOG(3) << "Starting task handler manager"; - auto channel = ::grpc::CreateChannel(dataset()->address_, credentials_); - std::unique_ptr master_stub = - MasterService::NewStub(channel); + DataServiceMasterClient master(dataset()->address_, dataset()->protocol_); uint64 next_check = Env::Default()->NowMicros(); while (true) { @@ -244,29 +234,27 @@ class DataServiceDatasetOp::Dataset : public DatasetBase { return; } } - UpdateTaskThreads(master_stub.get(), ctx.get()); + UpdateTaskThreads(&master, ctx.get()); next_check = Env::Default()->NowMicros() + dataset()->task_refresh_interval_ms_ * 1000; } } - void UpdateTaskThreads(MasterService::Stub* master_stub, + void UpdateTaskThreads(DataServiceMasterClient* master, IteratorContext* ctx) LOCKS_EXCLUDED(mu_) { VLOG(3) << "Updating task handler threads"; - GetTasksResponse resp; - GetTasksRequest req; - req.set_job_id(job_id_); - grpc::ClientContext client_ctx; - grpc::Status s = master_stub->GetTasks(&client_ctx, req, &resp); + std::vector tasks; + bool job_finished; + Status s = master->GetTasks(job_id_, &tasks, &job_finished); if (!s.ok()) { - LOG(INFO) << "Failed to get task info for job id " << job_id_ << ": " - << s.error_message() << "(" << s.error_code() << ")"; + LOG(WARNING) << "Failed to get task info for job id " << job_id_ << ": " + << s; return; } absl::flat_hash_set task_ids; mutex_lock l(mu_); - job_finished_ = resp.job_finished(); - for (auto& task : resp.task_info()) { + job_finished_ = job_finished; + for (auto& task : tasks) { task_ids.insert(task.id()); if (task_threads_.contains(task.id())) { continue; @@ -315,11 +303,12 @@ class DataServiceDatasetOp::Dataset : public DatasetBase { << task_handler->task_id << " with worker address " << task_handler->address; while (true) { - if (!task_handler->worker_stub) { - Status s = CreateWorkerStub(task_handler->address, - &task_handler->worker_stub); + if (!task_handler->worker) { + Status s = CreateDataServiceWorkerClient(task_handler->address, + dataset()->protocol_, + &task_handler->worker); if (!s.ok()) { - LOG(WARNING) << "Failed to create a worker stub for " + LOG(WARNING) << "Failed to create a worker client for " << task_handler->address << ": " << s; } } @@ -359,9 +348,11 @@ class DataServiceDatasetOp::Dataset : public DatasetBase { // `results_`. Status FetchElement(TaskThread* task_handler, int64 deadline_micros) { VLOG(3) << "Fetching an element for task id " << task_handler->task_id; - GetElementResponse resp; + CompressedElement compressed; + bool end_of_sequence; for (int num_retries = 0;; ++num_retries) { - Status s = RequestElement(task_handler, &resp); + Status s = task_handler->worker->GetElement( + task_handler->task_id, &compressed, &end_of_sequence); if (s.ok()) { break; } @@ -395,12 +386,11 @@ class DataServiceDatasetOp::Dataset : public DatasetBase { } std::vector element; - if (!resp.end_of_sequence()) { - TF_RETURN_IF_ERROR( - service_util::Uncompress(resp.compressed_element(), &element)); + if (!end_of_sequence) { + TF_RETURN_IF_ERROR(service_util::Uncompress(compressed, &element)); } mutex_lock l(mu_); - if (resp.end_of_sequence()) { + if (end_of_sequence) { task_handler->end_of_sequence = true; return Status::OK(); } @@ -410,31 +400,6 @@ class DataServiceDatasetOp::Dataset : public DatasetBase { return Status::OK(); } - Status RequestElement(TaskThread* task_handler, GetElementResponse* resp) { - GetElementRequest req; - req.set_task_id(task_handler->task_id); - grpc::ClientContext client_ctx; - grpc::Status s = - task_handler->worker_stub->GetElement(&client_ctx, req, resp); - if (s.ok()) { - return Status::OK(); - } - return grpc_util::WrapError("Failed to request an element", s); - } - - Status CreateWorkerStub(const std::string& worker_address, - std::unique_ptr* stub) { - ::grpc::ChannelArguments args; - args.SetMaxReceiveMessageSize(-1); - std::shared_ptr<::grpc::ChannelCredentials> credentials; - TF_RETURN_IF_ERROR(CredentialsFactory::CreateClientCredentials( - dataset()->protocol_, &credentials)); - auto channel = - ::grpc::CreateCustomChannel(worker_address, credentials, args); - *stub = WorkerService::NewStub(channel); - return Status::OK(); - } - mutex mu_; // TODO(aaudibert): split this into a couple cvs for different conditions // so that we can use notify_one and avoid unnecessary wakeups. @@ -450,7 +415,6 @@ class DataServiceDatasetOp::Dataset : public DatasetBase { // Set once in Initialize(). int64 job_id_; - std::shared_ptr<::grpc::ChannelCredentials> credentials_; int64 num_unfinished_tasks_ TF_GUARDED_BY(mu_) = 0; bool job_finished_ = false; diff --git a/tensorflow/core/kernels/data/experimental/data_service_ops.cc b/tensorflow/core/kernels/data/experimental/data_service_ops.cc index 51be4461c4f..fa3a1a51c1e 100644 --- a/tensorflow/core/kernels/data/experimental/data_service_ops.cc +++ b/tensorflow/core/kernels/data/experimental/data_service_ops.cc @@ -15,11 +15,7 @@ limitations under the License. #include "tensorflow/core/kernels/data/experimental/data_service_ops.h" -#include "grpcpp/create_channel.h" -#include "grpcpp/security/credentials.h" -#include "tensorflow/core/data/service/credentials_factory.h" -#include "tensorflow/core/data/service/grpc_util.h" -#include "tensorflow/core/data/service/master.grpc.pb.h" +#include "tensorflow/core/data/service/data_service.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/kernels/data/dataset_utils.h" #include "tensorflow/core/platform/errors.h" @@ -69,26 +65,14 @@ void RegisterDatasetOp::Compute(OpKernelContext* ctx) { OP_REQUIRES_OK( ctx, AsGraphDef(ctx, dataset, std::move(serialization_ctx), &graph_def)); - VLOG(3) << "Registering dataset with master at " << address - << ". Protocol=" << protocol; - std::shared_ptr<::grpc::ChannelCredentials> credentials; - OP_REQUIRES_OK( - ctx, CredentialsFactory::CreateClientCredentials(protocol, &credentials)); - auto channel = ::grpc::CreateChannel(address, credentials); - auto master_stub = MasterService::NewStub(channel); - GetOrRegisterDatasetRequest req; - *req.mutable_dataset()->mutable_graph() = graph_def; - GetOrRegisterDatasetResponse resp; - grpc::ClientContext client_ctx; - auto status = master_stub->GetOrRegisterDataset(&client_ctx, req, &resp); - if (!status.ok()) { - ctx->CtxFailure(grpc_util::WrapError("Failed to register dataset", status)); - return; - } + DataServiceMasterClient client(address, protocol); + int64 dataset_id; + OP_REQUIRES_OK(ctx, client.RegisterDataset(graph_def, &dataset_id)); + Tensor* output; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape{}, &output)); auto output_dataset_id = output->tensor(); - output_dataset_id() = resp.dataset_id(); + output_dataset_id() = dataset_id; } CreateJobOp::CreateJobOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} @@ -114,24 +98,11 @@ void CreateJobOp::Compute(OpKernelContext* ctx) { OP_REQUIRES_OK(ctx, ParseProcessingMode(processing_mode_str, &processing_mode)); - std::shared_ptr<::grpc::ChannelCredentials> credentials; - OP_REQUIRES_OK( - ctx, CredentialsFactory::CreateClientCredentials(protocol, &credentials)); - auto channel = ::grpc::CreateChannel(address, credentials); - auto master_stub = MasterService::NewStub(channel); - CreateJobRequest req; - req.set_dataset_id(dataset_id); - req.set_processing_mode(ProcessingModeDef(processing_mode)); - CreateJobResponse resp; - grpc::ClientContext client_ctx; - auto status = master_stub->CreateJob(&client_ctx, req, &resp); - if (!status.ok()) { - ctx->CtxFailure(grpc_util::WrapError( - absl::StrCat("Failed to begin epoch for dataset id ", dataset_id), - status)); - return; - } - JobToken token(resp.job_id()); + DataServiceMasterClient client(address, protocol); + int64 job_id; + OP_REQUIRES_OK(ctx, client.CreateJob(dataset_id, processing_mode, &job_id)); + + JobToken token(job_id); Tensor* output; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape{}, &output)); auto output_token = output->tensor(); diff --git a/tensorflow/core/kernels/data/experimental/data_service_ops.h b/tensorflow/core/kernels/data/experimental/data_service_ops.h index b7f72a891fb..ebbcb39d0a3 100644 --- a/tensorflow/core/kernels/data/experimental/data_service_ops.h +++ b/tensorflow/core/kernels/data/experimental/data_service_ops.h @@ -44,13 +44,6 @@ class RegisterDatasetOp : public OpKernel { SerializationContext::ExternalStatePolicy external_state_policy_; }; -enum class ProcessingMode : int64 { - // Each tf.data worker processes an entire epoch. - PARALLEL_EPOCHS = 0, - // Processing of an epoch is distributed across all tf.data workers. - ONE_EPOCH = 1, -}; - // Creates a token for reading from the tf.data service. // // The dataset_id input identifies which dataset to create a token for. diff --git a/tensorflow/core/kernels/data/iterator_ops.cc b/tensorflow/core/kernels/data/iterator_ops.cc index 0cd5ab5390e..21fa5bf6ac2 100644 --- a/tensorflow/core/kernels/data/iterator_ops.cc +++ b/tensorflow/core/kernels/data/iterator_ops.cc @@ -841,16 +841,9 @@ class OneShotIteratorOp : public AsyncOpKernel { opts.step_container = &step_container; opts.runner = ctx->runner(); opts.run_all_kernels_inline = ctx->run_all_kernels_inline(); - Notification n; - Status factory_status; std::vector return_values; - ctx->function_library()->Run(opts, f_handle, {}, &return_values, - [&n, &factory_status](Status s) { - factory_status.Update(s); - n.Notify(); - }); - n.WaitForNotification(); - TF_RETURN_IF_ERROR(factory_status); + TF_RETURN_IF_ERROR(ctx->function_library()->RunSync( + std::move(opts), f_handle, {}, &return_values)); if (return_values.size() != 1 || return_values[0].dtype() != DT_VARIANT || !TensorShapeUtils::IsScalar(return_values[0].shape())) { return errors::InvalidArgument( diff --git a/tensorflow/core/kernels/data/shuffle_dataset_op.cc b/tensorflow/core/kernels/data/shuffle_dataset_op.cc index 852ba23e774..3e549246a95 100644 --- a/tensorflow/core/kernels/data/shuffle_dataset_op.cc +++ b/tensorflow/core/kernels/data/shuffle_dataset_op.cc @@ -44,10 +44,10 @@ namespace data { /* static */ constexpr const char* const ShuffleDatasetOpBase::kSeed2; /* static */ constexpr const char* const ShuffleDatasetOpBase::kOutputTypes; /* static */ constexpr const char* const ShuffleDatasetOpBase::kOutputShapes; +/* static */ constexpr const char* const + ShuffleDatasetOpBase::kReshuffleEachIteration; /* static */ constexpr const char* const ShuffleDatasetOp::kDatasetType; -/* static */ constexpr const char* const - ShuffleDatasetOp::kReshuffleEachIteration; /* static */ constexpr const char* const ShuffleAndRepeatDatasetOp::kDatasetType; @@ -72,6 +72,8 @@ constexpr char kEpochNumRandomSamples[] = "epoch_num_random_samples"; constexpr char kShuffleDatasetV1[] = "ShuffleDataset"; constexpr char kShuffleDatasetV2[] = "ShuffleDatasetV2"; constexpr char kShuffleDatasetV3[] = "ShuffleDatasetV3"; +constexpr char kShuffleAndRepeatDatasetV1[] = "ShuffleAndRepeatDatasetV1"; +constexpr char kShuffleAndRepeatDatasetV2[] = "ShuffleAndRepeatDatasetV2"; ShuffleDatasetOpBase::ShuffleDatasetOpBase(OpKernelConstruction* ctx) : UnaryDatasetOpKernel(ctx) {} @@ -225,6 +227,10 @@ class ShuffleDatasetOpBase::ShuffleDatasetBase : public DatasetBase { while (!slices_.empty() && slices_.front()->start == slices_.front()->end) { slices_.pop_front(); + // Reinitialize the RNG state for the next epoch. + num_random_samples_ = 0; + seed_generator_->GenerateSeeds(&seed_, &seed2_); + ResetRngs(); } DCHECK(!slices_.empty()); // Choose an element to produce uniformly at random from the first @@ -663,6 +669,7 @@ void ShuffleDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, RandomSeeds seeds(seed, seed2); bool owns_resource = false; if (errors::IsNotFound(s)) { + owns_resource = true; OP_REQUIRES_OK( ctx, ctx->resource_manager()->LookupOrCreate( @@ -679,7 +686,6 @@ void ShuffleDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, return Status::OK(); })); handle = MakeResourceHandle(ctx, container, name); - owns_resource = true; } else { OP_REQUIRES_OK(ctx, s); } @@ -695,6 +701,7 @@ void ShuffleDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, handle.container(), handle.name(), &manager); bool owns_resource = false; if (errors::IsNotFound(s)) { + owns_resource = true; LOG(WARNING) << "Failed to find seed generator resource. Falling back to " "using a non-deterministically seeded generator and " "reshuffling each iteration."; @@ -708,7 +715,6 @@ void ShuffleDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, return Status::OK(); })); handle = MakeResourceHandle(ctx, container, name); - owns_resource = true; } else { OP_REQUIRES_OK(ctx, s); } @@ -790,9 +796,13 @@ class ShuffleAndRepeatDatasetOp::Dataset : public ShuffleDatasetBase { TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed)); TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2)); TF_RETURN_IF_ERROR(b->AddScalar(count_, &count)); + AttrValue reshuffle_each_iteration; + b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(), + &reshuffle_each_iteration); TF_RETURN_IF_ERROR(b->AddDataset( this, {input_graph_node, buffer_size, seed, seed2, count}, // Inputs - {}, // Attrs + {std::make_pair(kReshuffleEachIteration, + reshuffle_each_iteration)}, // Attrs output)); return Status::OK(); } @@ -804,8 +814,83 @@ class ShuffleAndRepeatDatasetOp::Dataset : public ShuffleDatasetBase { const RandomSeeds seeds_; }; +class ShuffleAndRepeatDatasetOp::DatasetV2 : public ShuffleDatasetBase { + public: + DatasetV2(OpKernelContext* ctx, const DatasetBase* input, int64 buffer_size, + int64 count, RandomSeeds&& seeds, SeedGeneratorManager* manager, + ResourceHandle&& resource_handle, bool owns_resource) + : ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count), + manager_(manager), + owns_resource_(owns_resource), + resource_handle_(std::move(resource_handle)), + resource_mgr_(ctx->resource_manager()), + seeds_(std::move(seeds)) {} + + ~DatasetV2() override { + manager_->Unref(); + if (owns_resource_) { + Status s = resource_mgr_->Delete( + resource_handle_.container(), resource_handle_.name()); + if (!s.ok()) { + LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString(); + } + } + } + + string op_type() const override { return kDatasetType; } + + protected: + Status AsGraphDefInternal(SerializationContext* ctx, + DatasetGraphDefBuilder* b, + Node** output) const override { + Node* input_graph_node = nullptr; + TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node)); + Node* buffer_size_node = nullptr; + Node* seed_node = nullptr; + Node* seed2_node = nullptr; + Node* count_node = nullptr; + TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node)); + TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node)); + TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node)); + TF_RETURN_IF_ERROR(b->AddScalar(count_, &count_node)); + Node* resource_handle_node = nullptr; + Tensor handle(DT_RESOURCE, TensorShape({})); + handle.scalar()() = resource_handle_; + TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node)); + AttrValue reshuffle_each_iteration; + b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(), + &reshuffle_each_iteration); + TF_RETURN_IF_ERROR( + b->AddDataset(this, + {input_graph_node, buffer_size_node, seed_node, + seed2_node, count_node, resource_handle_node}, // Inputs + {std::make_pair(kReshuffleEachIteration, + reshuffle_each_iteration)}, // Attrs + output)); + return Status::OK(); + } + + private: + SeedGeneratorManager* const manager_; // Owned + const bool owns_resource_; + const ResourceHandle resource_handle_; + ResourceMgr* const resource_mgr_; // Not owned. + const RandomSeeds seeds_; +}; + ShuffleAndRepeatDatasetOp::ShuffleAndRepeatDatasetOp(OpKernelConstruction* ctx) - : ShuffleDatasetOpBase(ctx) {} + : ShuffleDatasetOpBase(ctx) { + auto& op_name = ctx->def().op(); + if (op_name == kShuffleAndRepeatDatasetV2) { + op_version_ = 2; + } else if (op_name == kShuffleAndRepeatDatasetV1) { + op_version_ = 1; + } + if (ctx->HasAttr(kReshuffleEachIteration)) { + OP_REQUIRES_OK( + ctx, ctx->GetAttr(kReshuffleEachIteration, &reshuffle_each_iteration_)); + } +} void ShuffleAndRepeatDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, @@ -826,29 +911,76 @@ void ShuffleAndRepeatDatasetOp::MakeDataset(OpKernelContext* ctx, int64 count; OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kCount, &count)); - RandomSeeds seeds(seed, seed2); - OP_REQUIRES(ctx, count > 0 || count == -1, errors::InvalidArgument( "count must be greater than zero or equal to -1.")); + RandomSeeds seeds(seed, seed2); + static std::atomic resource_id_counter(0); const string& container = ctx->resource_manager()->default_container(); auto name = strings::StrCat(ctx->op_kernel().name(), "/", kSeedGenerator, "_", resource_id_counter.fetch_add(1)); - SeedGeneratorManager* manager; - OP_REQUIRES_OK( - ctx, - ctx->resource_manager()->LookupOrCreate( - container, name, &manager, [&seeds](SeedGeneratorManager** manager) { - *manager = new SeedGeneratorManager(new RandomSeedGenerator(seeds)); - return Status::OK(); - })); - auto handle = MakeResourceHandle(ctx, container, name); + if (op_version_ == 2) { + auto handle = HandleFromInput(ctx, 5); + SeedGeneratorManager* manager = nullptr; + Status s = ctx->resource_manager()->Lookup( + handle.container(), handle.name(), &manager); + bool owns_resource = false; + if (errors::IsNotFound(s)) { + owns_resource = true; + OP_REQUIRES_OK( + ctx, + ctx->resource_manager()->LookupOrCreate( + container, name, &manager, + [reshuffle = reshuffle_each_iteration_, + &seeds](SeedGeneratorManager** manager) { + if (reshuffle) { + *manager = + new SeedGeneratorManager(new RandomSeedGenerator(seeds)); + } else { + *manager = + new SeedGeneratorManager(new FixedSeedGenerator(seeds)); + } + return Status::OK(); + })); + handle = MakeResourceHandle(ctx, container, name); + } else { + OP_REQUIRES_OK(ctx, s); + } - // Ownership of manager is transferred onto `Dataset`. - *output = new Dataset(ctx, input, buffer_size, std::move(seeds), manager, - count, std::move(handle)); + // Ownership of manager is transferred onto `DatasetV2`. + *output = new ShuffleAndRepeatDatasetOp::DatasetV2( + ctx, input, buffer_size, count, std::move(seeds), manager, + std::move(handle), owns_resource); + } else { + if (op_version_ != 1) { + LOG(WARNING) << "Unsupported version of shuffle dataset op: " + << op_version_ << ". Defaulting to version 1."; + } + SeedGeneratorManager* manager; + OP_REQUIRES_OK( + ctx, + ctx->resource_manager()->LookupOrCreate( + container, name, &manager, + [reshuffle = reshuffle_each_iteration_, + &seeds](SeedGeneratorManager** manager) { + if (reshuffle) { + *manager = + new SeedGeneratorManager(new RandomSeedGenerator(seeds)); + } else { + *manager = + new SeedGeneratorManager(new FixedSeedGenerator(seeds)); + } + return Status::OK(); + })); + auto handle = + MakeResourceHandle(ctx, container, name); + + // Ownership of manager is transferred onto `Dataset`. + *output = new Dataset(ctx, input, buffer_size, std::move(seeds), manager, + count, std::move(handle)); + } } namespace { @@ -863,6 +995,9 @@ REGISTER_KERNEL_BUILDER(Name("ShuffleDatasetV3").Device(DEVICE_CPU), REGISTER_KERNEL_BUILDER(Name("ShuffleAndRepeatDataset").Device(DEVICE_CPU), ShuffleAndRepeatDatasetOp); + +REGISTER_KERNEL_BUILDER(Name("ShuffleAndRepeatDatasetV2").Device(DEVICE_CPU), + ShuffleAndRepeatDatasetOp); } // namespace } // namespace data } // namespace tensorflow diff --git a/tensorflow/core/kernels/data/shuffle_dataset_op.h b/tensorflow/core/kernels/data/shuffle_dataset_op.h index 7aa3c0e3ef0..f33f75c84eb 100644 --- a/tensorflow/core/kernels/data/shuffle_dataset_op.h +++ b/tensorflow/core/kernels/data/shuffle_dataset_op.h @@ -28,6 +28,8 @@ class ShuffleDatasetOpBase : public UnaryDatasetOpKernel { static constexpr const char* const kSeed2 = "seed2"; static constexpr const char* const kOutputTypes = "output_types"; static constexpr const char* const kOutputShapes = "output_shapes"; + static constexpr const char* const kReshuffleEachIteration = + "reshuffle_each_iteration"; explicit ShuffleDatasetOpBase(OpKernelConstruction* ctx); @@ -38,8 +40,6 @@ class ShuffleDatasetOpBase : public UnaryDatasetOpKernel { class ShuffleDatasetOp : public ShuffleDatasetOpBase { public: static constexpr const char* const kDatasetType = "Shuffle"; - static constexpr const char* const kReshuffleEachIteration = - "reshuffle_each_iteration"; explicit ShuffleDatasetOp(OpKernelConstruction* ctx); @@ -52,7 +52,7 @@ class ShuffleDatasetOp : public ShuffleDatasetOpBase { class DatasetV2; class DatasetV3; int op_version_ = 0; - bool reshuffle_each_iteration_; + bool reshuffle_each_iteration_ = true; }; class ShuffleAndRepeatDatasetOp : public ShuffleDatasetOpBase { @@ -68,6 +68,9 @@ class ShuffleAndRepeatDatasetOp : public ShuffleDatasetOpBase { private: class Dataset; + class DatasetV2; + int op_version_ = 0; + bool reshuffle_each_iteration_ = true; }; } // namespace data diff --git a/tensorflow/core/kernels/data/shuffle_dataset_op_test.cc b/tensorflow/core/kernels/data/shuffle_dataset_op_test.cc index 6d16d76ea61..65f6855b7fa 100644 --- a/tensorflow/core/kernels/data/shuffle_dataset_op_test.cc +++ b/tensorflow/core/kernels/data/shuffle_dataset_op_test.cc @@ -72,10 +72,8 @@ class ShuffleDatasetParams : public DatasetParams { output_dtypes_); attr_vector->emplace_back(ShuffleDatasetOpBase::kOutputShapes, output_shapes_); - if (count_ == 1) { - attr_vector->emplace_back(ShuffleDatasetOp::kReshuffleEachIteration, - reshuffle_each_iteration_); - } + attr_vector->emplace_back(ShuffleDatasetOp::kReshuffleEachIteration, + reshuffle_each_iteration_); return Status::OK(); } @@ -297,23 +295,23 @@ std::vector> GetNextTestCases() { {/*dataset_params=*/ShuffleDatasetParams7(), /*expected_shuffle_outputs=*/ CreateTensors(TensorShape({}), - {{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}, - {0}, {5}, {1}, {7}, {2}, {9}, {8}, {4}, {6}, {3}}), + {{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}, + {9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}}), /*expected_reshuffle_outputs=*/ - CreateTensors(TensorShape({}), {{1}, {6}, {0}, {5}, {2}, {7}, {4}, - {3}, {9}, {8}, {6}, {5}, {0}, {9}, - {4}, {7}, {2}, {8}, {1}, {3}})}, + CreateTensors(TensorShape({}), {{9}, {0}, {8}, {6}, {1}, {3}, {7}, + {2}, {4}, {5}, {9}, {0}, {8}, {6}, + {1}, {3}, {7}, {2}, {4}, {5}})}, {/*dataset_params=*/ShuffleDatasetParams8(), /*expected_shuffle_outputs=*/ CreateTensors( TensorShape({}), - {{1}, {2}, {0}, {1}, {2}, {0}, {1}, {0}, {2}, {1}, {0}, - {2}, {0}, {2}, {1}, {0}, {1}, {2}, {1}, {2}, {0}}), + {{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, + {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}}), /*expected_reshuffle_outputs=*/ CreateTensors( TensorShape({}), - {{1}, {0}, {2}, {0}, {1}, {2}, {2}, {1}, {0}, {0}, {1}, - {2}, {0}, {2}, {1}, {0}, {1}, {2}, {1}, {0}, {2}})}}; + {{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, + {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}})}}; } class ParameterizedGetNextTest : public ShuffleDatasetOpTest, @@ -496,16 +494,16 @@ IteratorSaveAndRestoreTestCases() { {/*dataset_params=*/ShuffleDatasetParams7(), /*breakpoints=*/{0, 5, 22}, /*expected_shuffle_outputs=*/ - CreateTensors(TensorShape({}), {{2}, {6}, {1}, {3}, {9}, {5}, {0}, - {8}, {7}, {4}, {0}, {5}, {1}, {7}, - {2}, {9}, {8}, {4}, {6}, {3}})}, + CreateTensors(TensorShape({}), {{9}, {0}, {8}, {6}, {1}, {3}, {7}, + {2}, {4}, {5}, {9}, {0}, {8}, {6}, + {1}, {3}, {7}, {2}, {4}, {5}})}, {/*dataset_params=*/ShuffleDatasetParams8(), /*breakpoints=*/{0, 5, 20}, /*expected_shuffle_outputs=*/ CreateTensors( TensorShape({}), - {{1}, {2}, {0}, {1}, {2}, {0}, {1}, {0}, {2}, {1}, {0}, - {2}, {0}, {2}, {1}, {0}, {1}, {2}, {1}, {2}, {0}})}}; + {{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, + {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}})}}; } class ParameterizedIteratorSaveAndRestoreTest diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDataset.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDataset.pbtxt index 5af8dd5896a..21ffe33b6e6 100644 --- a/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDataset.pbtxt +++ b/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDataset.pbtxt @@ -37,3 +37,49 @@ op { minimum: 1 } } +op { + name: "ShuffleAndRepeatDataset" + input_arg { + name: "input_dataset" + type: DT_VARIANT + } + input_arg { + name: "buffer_size" + type: DT_INT64 + } + input_arg { + name: "seed" + type: DT_INT64 + } + input_arg { + name: "seed2" + type: DT_INT64 + } + input_arg { + name: "count" + type: DT_INT64 + } + output_arg { + name: "handle" + type: DT_VARIANT + } + attr { + name: "output_types" + type: "list(type)" + has_minimum: true + minimum: 1 + } + attr { + name: "output_shapes" + type: "list(shape)" + has_minimum: true + minimum: 1 + } + attr { + name: "reshuffle_each_iteration" + type: "bool" + default_value { + b: true + } + } +} diff --git a/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDatasetV2.pbtxt b/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDatasetV2.pbtxt new file mode 100644 index 00000000000..bac1de1c30c --- /dev/null +++ b/tensorflow/core/ops/compat/ops_history_v2/ShuffleAndRepeatDatasetV2.pbtxt @@ -0,0 +1,51 @@ +op { + name: "ShuffleAndRepeatDatasetV2" + input_arg { + name: "input_dataset" + type: DT_VARIANT + } + input_arg { + name: "buffer_size" + type: DT_INT64 + } + input_arg { + name: "seed" + type: DT_INT64 + } + input_arg { + name: "seed2" + type: DT_INT64 + } + input_arg { + name: "count" + type: DT_INT64 + } + input_arg { + name: "seed_generator" + type: DT_RESOURCE + } + output_arg { + name: "handle" + type: DT_VARIANT + } + attr { + name: "reshuffle_each_iteration" + type: "bool" + default_value { + b: true + } + } + attr { + name: "output_types" + type: "list(type)" + has_minimum: true + minimum: 1 + } + attr { + name: "output_shapes" + type: "list(shape)" + has_minimum: true + minimum: 1 + } + is_stateful: true +} diff --git a/tensorflow/core/ops/dataset_ops.cc b/tensorflow/core/ops/dataset_ops.cc index 6dc2280feae..ab2cf35fa08 100644 --- a/tensorflow/core/ops/dataset_ops.cc +++ b/tensorflow/core/ops/dataset_ops.cc @@ -507,6 +507,7 @@ REGISTER_OP("ShuffleAndRepeatDataset") .Output("handle: variant") .Attr("output_types: list(type) >= 1") .Attr("output_shapes: list(shape) >= 1") + .Attr("reshuffle_each_iteration: bool = true") .SetShapeFn([](shape_inference::InferenceContext* c) { shape_inference::ShapeHandle unused; // buffer_size, seed, seed2, and count should be scalars. @@ -517,6 +518,28 @@ REGISTER_OP("ShuffleAndRepeatDataset") return shape_inference::ScalarShape(c); }); +REGISTER_OP("ShuffleAndRepeatDatasetV2") + .Input("input_dataset: variant") + .Input("buffer_size: int64") + .Input("seed: int64") + .Input("seed2: int64") + .Input("count: int64") + .Input("seed_generator: resource") + .Output("handle: variant") + .Attr("reshuffle_each_iteration: bool = true") + .Attr("output_types: list(type) >= 1") + .Attr("output_shapes: list(shape) >= 1") + .SetShapeFn([](shape_inference::InferenceContext* c) { + shape_inference::ShapeHandle unused; + // buffer_size, seed, seed2, count, and seed_generator should be scalars. + TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused)); + TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &unused)); + return shape_inference::ScalarShape(c); + }); + REGISTER_OP("AnonymousMemoryCache") .Output("handle: resource") .Output("deleter: variant") diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt index 95cd4e23f3e..f1591b98ffa 100644 --- a/tensorflow/core/ops/ops.pbtxt +++ b/tensorflow/core/ops/ops.pbtxt @@ -42275,6 +42275,64 @@ op { has_minimum: true minimum: 1 } + attr { + name: "reshuffle_each_iteration" + type: "bool" + default_value { + b: true + } + } +} +op { + name: "ShuffleAndRepeatDatasetV2" + input_arg { + name: "input_dataset" + type: DT_VARIANT + } + input_arg { + name: "buffer_size" + type: DT_INT64 + } + input_arg { + name: "seed" + type: DT_INT64 + } + input_arg { + name: "seed2" + type: DT_INT64 + } + input_arg { + name: "count" + type: DT_INT64 + } + input_arg { + name: "seed_generator" + type: DT_RESOURCE + } + output_arg { + name: "handle" + type: DT_VARIANT + } + attr { + name: "reshuffle_each_iteration" + type: "bool" + default_value { + b: true + } + } + attr { + name: "output_types" + type: "list(type)" + has_minimum: true + minimum: 1 + } + attr { + name: "output_shapes" + type: "list(shape)" + has_minimum: true + minimum: 1 + } + is_stateful: true } op { name: "ShuffleDataset" diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 8d5e3e6e1ff..e3df3820c71 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -108,7 +108,7 @@ limitations under the License. #define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 #define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 -#define TF_GRAPH_DEF_VERSION 386 // Updated: 2020/4/29 +#define TF_GRAPH_DEF_VERSION 387 // Updated: 2020/4/30 // Checkpoint compatibility versions (the versions field in SavedSliceMeta). // diff --git a/tensorflow/core/util/use_cudnn.cc b/tensorflow/core/util/use_cudnn.cc index fc9988df5fd..d0157f8ad37 100644 --- a/tensorflow/core/util/use_cudnn.cc +++ b/tensorflow/core/util/use_cudnn.cc @@ -22,6 +22,24 @@ limitations under the License. namespace tensorflow { +bool CanUseCudnn() { + static bool is_enabled = [] { + bool is_enabled = true; + // TODO(b/155239286): Remove TF_USE_CUDNN after TF 2.3 is released. + Status status = + ReadBoolFromEnvVar("TF_USE_CUDNN", /*default_val=*/true, &is_enabled); + if (!status.ok()) { + LOG(ERROR) << status; + } + if (!is_enabled) { + LOG(WARNING) << "The environmental variable TF_USE_CUDNN is deprecated " + "and will be ignored in the future"; + } + return is_enabled; + }(); + return is_enabled; +} + #define ADD_BOOL_CUDNN_FLAG(func_name, flag_name, default_value) \ bool func_name() { \ bool value = default_value; \ @@ -32,7 +50,6 @@ namespace tensorflow { return value; \ } -ADD_BOOL_CUDNN_FLAG(CanUseCudnn, TF_USE_CUDNN, true); ADD_BOOL_CUDNN_FLAG(CudnnUseAutotune, TF_CUDNN_USE_AUTOTUNE, true); // Whether to auto-tuning Cudnn RNN forward and backward pass to pick // statistically the best cudnnRNNAlgo_t and cudnnMathType_t. diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go index f5bad688985..00f3d280b32 100644 --- a/tensorflow/go/op/wrappers.go +++ b/tensorflow/go/op/wrappers.go @@ -16989,6 +16989,17 @@ func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x return op.Output(0), op.Output(1), op.Output(2) } +// ShuffleAndRepeatDatasetAttr is an optional argument to ShuffleAndRepeatDataset. +type ShuffleAndRepeatDatasetAttr func(optionalAttr) + +// ShuffleAndRepeatDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value. +// If not specified, defaults to true +func ShuffleAndRepeatDatasetReshuffleEachIteration(value bool) ShuffleAndRepeatDatasetAttr { + return func(m optionalAttr) { + m["reshuffle_each_iteration"] = value + } +} + // Creates a dataset that shuffles and repeats elements from `input_dataset` // // pseudorandomly. @@ -17006,11 +17017,14 @@ func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x // should be repeated. The default is `-1`, which results in infinite repetition. // // -func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) { +func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleAndRepeatDatasetAttr) (handle tf.Output) { if scope.Err() != nil { return } attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes} + for _, a := range optional { + a(attrs) + } opspec := tf.OpSpec{ Type: "ShuffleAndRepeatDataset", Input: []tf.Input{ @@ -26814,7 +26828,7 @@ func Reverse(scope *Scope, tensor tf.Output, dims tf.Output) (output tf.Output) // // @tf.function // def foo(x, y): -// return = mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) +// return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) // // graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def() // ``` diff --git a/tensorflow/lite/core/api/flatbuffer_conversions.cc b/tensorflow/lite/core/api/flatbuffer_conversions.cc index 998b7d5fbf1..6c861151283 100644 --- a/tensorflow/lite/core/api/flatbuffer_conversions.cc +++ b/tensorflow/lite/core/api/flatbuffer_conversions.cc @@ -913,6 +913,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, case BuiltinOperator_SEGMENT_SUM: return kTfLiteOk; } + return kTfLiteError; } // NOLINT[readability/fn_size] } // namespace tflite diff --git a/tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn_init.cc b/tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn_init.cc deleted file mode 100644 index 70e3a2e8435..00000000000 --- a/tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn_init.cc +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn_init.h" - -#include -#include -#include -#include - -#include "remote.h" // NOLINT -#include "rpcmem.h" // NOLINT -#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/soc_model.h" - -#ifdef __cplusplus -extern "C" { -#endif -// Version 1.17 -static const int kHexagonNNVersion = 136960; -#pragma weak remote_handle_control // Declare it as a weak symbol -void hexagon_nn_global_init() { - rpcmem_init(); - // Non-domains QoS invocation - struct remote_rpc_control_latency data; - data.enable = 1; - if (remote_handle_control) { // Check if API is available before invoking - remote_handle_control(DSPRPC_CONTROL_LATENCY, (void*)&data, sizeof(data)); - } -} - -void hexagon_nn_global_teardown() { rpcmem_deinit(); } - -bool hexagon_nn_is_device_supported() { - return tflite::delegates::getsoc_model().mode != UNSPECIFIED_MODE; -} - -int hexagon_nn_hexagon_interface_version() { return kHexagonNNVersion; } - -#ifdef __cplusplus -} -#endif diff --git a/tensorflow/lite/interpreter_test.cc b/tensorflow/lite/interpreter_test.cc index 4eccdf302a0..abd92ad563d 100644 --- a/tensorflow/lite/interpreter_test.cc +++ b/tensorflow/lite/interpreter_test.cc @@ -22,7 +22,6 @@ limitations under the License. #include #include #include "third_party/eigen3/Eigen/Core" -#include "tensorflow/lite/context.h" #include "tensorflow/lite/core/api/error_reporter.h" #include "tensorflow/lite/external_cpu_backend_context.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" @@ -749,10 +748,22 @@ TEST(BasicInterpreter, ThreeStepAllocate) { ASSERT_EQ(interpreter.SetOutputs({4}), kTfLiteOk); TfLiteQuantizationParams quantized; - char data[] = {1, 0, 0, 0, 12, 0, 0, 0, 15, 0, 0, 0, 'A', 'B', 'C'}; + + // String tensor with one string of length 3 + union { + char raw_bytes[15]; + struct { + int32_t num_strs; + int32_t offsets[2]; + char str_data[3]; + } tensor_data; + } data; + data.tensor_data = {1, {12, 15}, {'A', 'B', 'C'}}; + // Read only string tensor. ASSERT_EQ(interpreter.SetTensorParametersReadOnly(0, kTfLiteString, "", {1}, - quantized, data, 15), + quantized, data.raw_bytes, + sizeof(data.raw_bytes)), kTfLiteOk); // Read-write string tensor. ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteString, "", {1}, diff --git a/tensorflow/lite/java/BUILD b/tensorflow/lite/java/BUILD index c736c7c4f31..49c2136ffb4 100644 --- a/tensorflow/lite/java/BUILD +++ b/tensorflow/lite/java/BUILD @@ -348,6 +348,14 @@ filegroup( visibility = ["//visibility:public"], ) +filegroup( + name = "portable_gpu_tests", + srcs = [ + "src/test/java/org/tensorflow/lite/gpu/GpuDelegateTest.java", + ], + visibility = ["//visibility:public"], +) + filegroup( name = "libtensorflowlite_jni", srcs = select({ diff --git a/tensorflow/lite/java/src/test/java/org/tensorflow/lite/gpu/GpuDelegateTest.java b/tensorflow/lite/java/src/test/java/org/tensorflow/lite/gpu/GpuDelegateTest.java new file mode 100644 index 00000000000..b9cbc27052f --- /dev/null +++ b/tensorflow/lite/java/src/test/java/org/tensorflow/lite/gpu/GpuDelegateTest.java @@ -0,0 +1,57 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +package org.tensorflow.lite.gpu; + +import static com.google.common.truth.Truth.assertThat; + +import java.nio.ByteBuffer; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.tensorflow.lite.Interpreter; +import org.tensorflow.lite.TestUtils; + +/** Unit tests for {@link org.tensorflow.lite.gpu.GpuDelegate}. */ +@RunWith(JUnit4.class) +public final class GpuDelegateTest { + + private static final String MODEL_PATH = "tensorflow/lite/java/src/testdata/add.bin"; + private static final ByteBuffer MODEL_BUFFER = TestUtils.getTestFileAsBuffer(MODEL_PATH); + + @Test + public void testBasic() throws Exception { + try (GpuDelegate delegate = new GpuDelegate()) { + assertThat(delegate.getNativeHandle()).isNotEqualTo(0); + } + } + + @Test + public void testInterpreterWithGpu() throws Exception { + Interpreter.Options options = new Interpreter.Options(); + try (GpuDelegate delegate = new GpuDelegate(); + Interpreter interpreter = new Interpreter(MODEL_BUFFER, options.addDelegate(delegate))) { + float[] oneD = {1.23f, 6.54f, 7.81f}; + float[][] twoD = {oneD, oneD, oneD, oneD, oneD, oneD, oneD, oneD}; + float[][][] threeD = {twoD, twoD, twoD, twoD, twoD, twoD, twoD, twoD}; + float[][][][] fourD = {threeD, threeD}; + float[][][][] parsedOutputs = new float[2][8][8][3]; + interpreter.run(fourD, parsedOutputs); + float[] outputOneD = parsedOutputs[0][0][0]; + float[] expected = {3.69f, 19.62f, 23.43f}; + assertThat(outputOneD).usingTolerance(0.1f).containsExactly(expected).inOrder(); + } + } +} diff --git a/tensorflow/lite/kernels/BUILD b/tensorflow/lite/kernels/BUILD index ae49d798fa8..5b6fe4b5b21 100644 --- a/tensorflow/lite/kernels/BUILD +++ b/tensorflow/lite/kernels/BUILD @@ -338,6 +338,7 @@ cc_library( # Depend on ruy regardless of `tflite_with_ruy`. See the comment in # cpu_backend_gemm.h about why ruy is the generic path. "@ruy//ruy", + "@ruy//ruy:matrix", "@ruy//ruy:path", "@ruy//ruy/profiler:instrumentation", # We only need to depend on gemmlowp and Eigen when tflite_with_ruy @@ -525,6 +526,7 @@ cc_library( visibility = ["//visibility:private"], deps = [ ":cpu_backend_context", + ":cpu_backend_gemm", ":cpu_backend_threadpool", ":eigen_support", ":kernel_util", diff --git a/tensorflow/lite/kernels/cpu_backend_context.cc b/tensorflow/lite/kernels/cpu_backend_context.cc index 0fa4175973a..d6de9bf8d61 100644 --- a/tensorflow/lite/kernels/cpu_backend_context.cc +++ b/tensorflow/lite/kernels/cpu_backend_context.cc @@ -55,9 +55,6 @@ CpuBackendContext::CpuBackendContext() ruy_context_(new ruy::Context), gemmlowp_context_(new gemmlowp::GemmContext) { SetMaxNumThreads(kDefaultNumThreadpoolThreads); -#ifdef TFLITE_WITH_RUY_GEMV - ruy_context_->set_cache_policy(ruy::CachePolicy::kCacheLHSOnNarrowMul); -#endif } CpuBackendContext::~CpuBackendContext() {} diff --git a/tensorflow/lite/kernels/cpu_backend_gemm_params.h b/tensorflow/lite/kernels/cpu_backend_gemm_params.h index 66700ea9cdf..0040f40cd50 100644 --- a/tensorflow/lite/kernels/cpu_backend_gemm_params.h +++ b/tensorflow/lite/kernels/cpu_backend_gemm_params.h @@ -29,6 +29,17 @@ namespace cpu_backend_gemm { // Matrix storage order: column-major or row-major. enum class Order { kColMajor, kRowMajor }; +enum class CachePolicy : std::uint8_t { + kNeverCache, + kCacheIfLargeSpeedup, + kAlwaysCache, +}; + +inline CachePolicy DefaultCachePolicy(bool is_constant_data) { + return is_constant_data ? CachePolicy::kCacheIfLargeSpeedup + : CachePolicy::kNeverCache; +} + // MatrixParams encapsulates the parameters that Gemm needs about each // matrix, besides the buffer data pointer. // Compare to ruy::Matrix, which also encapsulates the data pointer. @@ -47,10 +58,13 @@ struct MatrixParams { // The zero_point, i.e. which Scalar value is to be interpreted as zero. // When Scalar is floating-point, this must be 0. Scalar zero_point = 0; - // Indicate whether the underlying data will remain unchanged for - // some period of time. Defaults to false, but should be set to true - // for unchanging data (e.g. weights buffers in many cases) - bool cacheable = false; + // When the data pointed to by this matrix is constant data, so that it is + // valid to assume that equality of pointers implies equality of data, + // a CachePolicy may be used instead of the default kNeverCache, + // which will enable ruy to take advantage of this constancy of the data to + // cache the packing work, which can be a large speedup in matrix*vector + // and other narrow shapes. + CachePolicy cache_policy = CachePolicy::kNeverCache; }; // Enumeration of broad categories of Gemm. diff --git a/tensorflow/lite/kernels/cpu_backend_gemm_ruy.h b/tensorflow/lite/kernels/cpu_backend_gemm_ruy.h index a5bcccda331..b441628a67b 100644 --- a/tensorflow/lite/kernels/cpu_backend_gemm_ruy.h +++ b/tensorflow/lite/kernels/cpu_backend_gemm_ruy.h @@ -16,6 +16,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_RUY_H_ #define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_RUY_H_ +#include "ruy/matrix.h" // from @ruy #include "ruy/path.h" // from @ruy #include "ruy/ruy.h" // from @ruy #include "tensorflow/lite/kernels/cpu_backend_context.h" @@ -25,6 +26,20 @@ namespace tflite { namespace cpu_backend_gemm { namespace detail { +inline ruy::CachePolicy ToRuyCachePolicy(CachePolicy cache_policy) { + switch (cache_policy) { + case CachePolicy::kNeverCache: + return ruy::CachePolicy::kNeverCache; + case CachePolicy::kCacheIfLargeSpeedup: + return ruy::CachePolicy::kCacheIfLargeSpeedup; + case CachePolicy::kAlwaysCache: + return ruy::CachePolicy::kAlwaysCache; + default: + TFLITE_DCHECK(false); + return ruy::CachePolicy::kNeverCache; + } +} + template void MakeRuyMatrix(const MatrixParams& params, DataPointer data_ptr, ruy::Matrix* dst) { @@ -37,7 +52,9 @@ void MakeRuyMatrix(const MatrixParams& params, DataPointer data_ptr, // It does care whether we assign to it a Scalar* or a const Scalar*. dst->set_data(data_ptr); dst->set_zero_point(params.zero_point); - dst->set_cacheable(params.cacheable); +#ifdef TFLITE_WITH_RUY_GEMV + dst->set_cache_policy(ToRuyCachePolicy(params.cache_policy)); +#endif } template diff --git a/tensorflow/lite/kernels/fully_connected.cc b/tensorflow/lite/kernels/fully_connected.cc index 5faf13303d8..62a4ede9a06 100644 --- a/tensorflow/lite/kernels/fully_connected.cc +++ b/tensorflow/lite/kernels/fully_connected.cc @@ -22,6 +22,7 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" +#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/optimized/sparse_ops/fully_connected.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" diff --git a/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.cc b/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.cc index 32584fcd027..07f3117dac7 100644 --- a/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.cc +++ b/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.cc @@ -1042,7 +1042,7 @@ void NeonCpuBackendGemm(const int8_t* input, const int32_t* bias, lhs_params.order = cpu_backend_gemm::Order::kRowMajor; lhs_params.rows = n_output; lhs_params.cols = n_input; - lhs_params.cacheable = true; + lhs_params.cache_policy = cpu_backend_gemm::CachePolicy::kCacheIfLargeSpeedup; MatrixParams rhs_params; rhs_params.order = cpu_backend_gemm::Order::kColMajor; diff --git a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h index 6e1f805f7f4..5f183de7269 100644 --- a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h +++ b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h @@ -286,13 +286,15 @@ inline void FullyConnected( rhs_params.order = cpu_backend_gemm::Order::kColMajor; rhs_params.rows = input_rows; rhs_params.cols = input_shape.FlatSize() / input_rows; - rhs_params.cacheable = params.rhs_cacheable; + rhs_params.cache_policy = + cpu_backend_gemm::DefaultCachePolicy(params.rhs_cacheable); TFLITE_DCHECK_EQ(input_shape.FlatSize(), rhs_params.rows * rhs_params.cols); cpu_backend_gemm::MatrixParams lhs_params; lhs_params.order = cpu_backend_gemm::Order::kRowMajor; lhs_params.cols = weights_shape.Dims(dims_count - 1); lhs_params.rows = FlatSizeSkipDim(weights_shape, dims_count - 1); - lhs_params.cacheable = params.lhs_cacheable; + lhs_params.cache_policy = + cpu_backend_gemm::DefaultCachePolicy(params.lhs_cacheable); cpu_backend_gemm::MatrixParams dst_params; dst_params.order = cpu_backend_gemm::Order::kColMajor; dst_params.rows = output_shape.Dims(output_shape.DimensionsCount() - 1); @@ -345,13 +347,15 @@ inline void FullyConnected( lhs_params.cols = filter_cols; lhs_params.order = cpu_backend_gemm::Order::kRowMajor; lhs_params.zero_point = -filter_offset; - lhs_params.cacheable = params.lhs_cacheable; + lhs_params.cache_policy = + cpu_backend_gemm::DefaultCachePolicy(params.lhs_cacheable); cpu_backend_gemm::MatrixParams rhs_params; rhs_params.rows = filter_cols; rhs_params.cols = batches; rhs_params.order = cpu_backend_gemm::Order::kColMajor; rhs_params.zero_point = -input_offset; - rhs_params.cacheable = params.rhs_cacheable; + rhs_params.cache_policy = + cpu_backend_gemm::DefaultCachePolicy(params.rhs_cacheable); cpu_backend_gemm::MatrixParams dst_params; dst_params.rows = filter_rows; dst_params.cols = batches; @@ -404,13 +408,15 @@ inline void FullyConnected( lhs_params.cols = accum_depth; lhs_params.order = cpu_backend_gemm::Order::kRowMajor; lhs_params.zero_point = -filter_offset; - lhs_params.cacheable = params.lhs_cacheable; + lhs_params.cache_policy = + cpu_backend_gemm::DefaultCachePolicy(params.lhs_cacheable); cpu_backend_gemm::MatrixParams rhs_params; rhs_params.rows = accum_depth; rhs_params.cols = batches; rhs_params.order = cpu_backend_gemm::Order::kColMajor; rhs_params.zero_point = -input_offset; - rhs_params.cacheable = params.rhs_cacheable; + rhs_params.cache_policy = + cpu_backend_gemm::DefaultCachePolicy(params.rhs_cacheable); cpu_backend_gemm::MatrixParams dst_params; dst_params.rows = output_depth; dst_params.cols = batches; diff --git a/tensorflow/lite/micro/kernels/BUILD b/tensorflow/lite/micro/kernels/BUILD index 1ba500fd61b..a1003a84201 100644 --- a/tensorflow/lite/micro/kernels/BUILD +++ b/tensorflow/lite/micro/kernels/BUILD @@ -69,7 +69,6 @@ cc_library( "xtensa_hifimini/quantize.cc", "xtensa_hifimini/softmax.cc", "xtensa_hifimini/svdf.cc", - "xtensa_hifimini/utils.h", ], }), hdrs = ["micro_ops.h"], diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/README.md b/tensorflow/lite/micro/kernels/cmsis-nn/README.md index 4107ba466db..93da68b130f 100644 --- a/tensorflow/lite/micro/kernels/cmsis-nn/README.md +++ b/tensorflow/lite/micro/kernels/cmsis-nn/README.md @@ -48,7 +48,17 @@ cp tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/DSP/Include/\ arm_math.h mbed-os/cmsis/TARGET_CORTEX_M/arm_math.h ``` -This issue will be resolved soon. Now type +There's also a dependency to an old cmsis_gcc.h, which you can fix with the +following: + +``` +tensorflow/lite/micro/tools/make/downloads/cmsis/CMSIS/Core/Include/\ +cmsis_gcc.h mbed-os/cmsis/TARGET_CORTEX_M/cmsis_gcc.h +``` + +This issue will be resolved soon. + +Now type: ``` mbed compile -m DISCO_F746NG -t GCC_ARM diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cc b/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cc index c174390fc88..a12f628e721 100644 --- a/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cc +++ b/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cc @@ -145,7 +145,7 @@ TfLiteStatus AverageEvalInt8(TfLiteContext* context, const TfLiteNode* node, ARM_MATH_SUCCESS); #else #pragma message( \ - "CMSIS-NN optimization for depthwise_conv not available for this target. Using reference kernel.") + "CMSIS-NN optimization for avg_pool not available for this target. Using reference kernel.") PoolParams op_params; op_params.stride_height = params->stride_height; @@ -165,8 +165,8 @@ TfLiteStatus AverageEvalInt8(TfLiteContext* context, const TfLiteNode* node, } void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, TfLiteTensor* output) { + TfLitePoolParams* params, OpData* data, TfLiteTensor* input, + TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); @@ -187,7 +187,7 @@ void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, - const TfLiteTensor* input, TfLiteTensor* output) { + TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min, activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); @@ -206,6 +206,74 @@ void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node, GetTensorData(output)); } +TfLiteStatus MaxEvalInt8(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, const OpData* data, + TfLiteTensor* input, TfLiteTensor* output) { + int32_t activation_min, activation_max; + (void)CalculateActivationRangeQuantized(context, params->activation, output, + &activation_min, &activation_max); + + TFLITE_DCHECK_LE(activation_min, activation_max); + +#if defined(__ARM_FEATURE_DSP) + RuntimeShape input_shape = GetTensorShape(input); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + + RuntimeShape output_shape = GetTensorShape(output); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int stride_height = params->stride_height; + const int stride_width = params->stride_width; + + const int filter_height = params->filter_height; + const int filter_width = params->filter_width; + const int padding_height = data->padding.height; + const int padding_width = data->padding.width; + + int16_t* scratch_buffer = nullptr; + + auto* buffer_idx = reinterpret_cast(node->user_data); + + if (*buffer_idx > -1) { + void* raw = context->GetScratchBuffer(context, *buffer_idx); + scratch_buffer = reinterpret_cast(raw); + } + + TF_LITE_ENSURE_EQ( + context, + arm_max_pool_s8_opt(input_height, input_width, output_height, + output_width, stride_height, stride_width, + filter_height, filter_width, padding_height, + padding_width, activation_min, activation_max, depth, + GetTensorData(input), scratch_buffer, + GetTensorData(output)), + ARM_MATH_SUCCESS); +#else +#pragma message( \ + "CMSIS-NN optimization for max_pool not available for this target. Using reference kernel.") + + PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width = data->padding.width; + op_params.quantized_activation_min = activation_min; + op_params.quantized_activation_max = activation_max; + reference_integer_ops::MaxPool( + op_params, GetTensorShape(input), GetTensorData(input), + GetTensorShape(output), GetTensorData(output)); + +#endif + return kTfLiteOk; +} + } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { @@ -278,7 +346,8 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); OpData data; - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* input = &context->tensors[flatbuffers::EndianScalar( + node->inputs->data[kInputTensor])]; TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, &data)); @@ -290,6 +359,9 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteUInt8: MaxEvalQuantizedUInt8(context, node, params, &data, input, output); break; + case kTfLiteInt8: + MaxEvalInt8(context, node, params, &data, input, output); + break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); diff --git a/tensorflow/lite/micro/kernels/conv.cc b/tensorflow/lite/micro/kernels/conv.cc index 5d1418a68b1..7334cf13e49 100644 --- a/tensorflow/lite/micro/kernels/conv.cc +++ b/tensorflow/lite/micro/kernels/conv.cc @@ -33,7 +33,7 @@ constexpr int kInputTensor = 0; constexpr int kFilterTensor = 1; constexpr int kBiasTensor = 2; constexpr int kOutputTensor = 0; -constexpr int kMaxChannels = 256; +constexpr int kMaxChannels = 1024; // Conv is quantized along dimension 0: // https://www.tensorflow.org/lite/performance/quantization_spec diff --git a/tensorflow/lite/micro/kernels/depthwise_conv.cc b/tensorflow/lite/micro/kernels/depthwise_conv.cc index 5d76642d37d..8618646a4ea 100644 --- a/tensorflow/lite/micro/kernels/depthwise_conv.cc +++ b/tensorflow/lite/micro/kernels/depthwise_conv.cc @@ -35,7 +35,7 @@ constexpr int kInputTensor = 0; constexpr int kFilterTensor = 1; constexpr int kBiasTensor = 2; constexpr int kOutputTensor = 0; -constexpr int kMaxChannels = 256; +constexpr int kMaxChannels = 1024; // Depthwise conv is quantized along dimension 3: // https://www.tensorflow.org/lite/performance/quantization_spec diff --git a/tensorflow/lite/micro/kernels/fully_connected.cc b/tensorflow/lite/micro/kernels/fully_connected.cc index 54c923cd314..6156ddb7ab9 100644 --- a/tensorflow/lite/micro/kernels/fully_connected.cc +++ b/tensorflow/lite/micro/kernels/fully_connected.cc @@ -48,7 +48,7 @@ constexpr int kBiasTensor = 2; constexpr int kOutputTensor = 0; TfLiteStatus CalculateOpData(TfLiteContext* context, - TfLiteFullyConnectedParams* params, + TfLiteFusedActivation activation, TfLiteType data_type, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, @@ -62,7 +62,7 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, QuantizeMultiplier(real_multiplier, &data->output_multiplier, &exponent); data->output_shift = -exponent; TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, + context, activation, output, &data->output_activation_min, &data->output_activation_max)); } return status; @@ -85,19 +85,18 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, - TfLiteFullyConnectedParams* params, OpData* data, - const TfLiteTensor* input, + const OpData& data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output) { FullyConnectedParams op_params; op_params.input_offset = -input->params.zero_point; op_params.weights_offset = -filter->params.zero_point; op_params.output_offset = output->params.zero_point; - op_params.output_multiplier = data->output_multiplier; + op_params.output_multiplier = data.output_multiplier; // TODO(b/138810107): Figure out whether output shift should be inverted - op_params.output_shift = -data->output_shift; - op_params.quantized_activation_min = data->output_activation_min; - op_params.quantized_activation_max = data->output_activation_max; + op_params.output_shift = -data.output_shift; + op_params.quantized_activation_min = data.output_activation_min; + op_params.quantized_activation_max = data.output_activation_max; reference_integer_ops::FullyConnected( op_params, GetTensorShape(input), GetTensorData(input), @@ -108,8 +107,7 @@ TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, } TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteFullyConnectedParams* params, OpData* data, - const TfLiteTensor* input, + const OpData& data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output) { const int32_t input_offset = -input->params.zero_point; @@ -120,11 +118,11 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, op_params.input_offset = input_offset; op_params.weights_offset = filter_offset; op_params.output_offset = output_offset; - op_params.output_multiplier = data->output_multiplier; + op_params.output_multiplier = data.output_multiplier; // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. - op_params.output_shift = -data->output_shift; - op_params.quantized_activation_min = data->output_activation_min; - op_params.quantized_activation_max = data->output_activation_max; + op_params.output_shift = -data.output_shift; + op_params.quantized_activation_min = data.output_activation_min; + op_params.quantized_activation_max = data.output_activation_max; #define TF_LITE_FULLY_CONNECTED(output_data_type) \ reference_ops::FullyConnected( \ @@ -149,11 +147,11 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, } TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteFullyConnectedParams* params, OpData* data, + TfLiteFusedActivation activation, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output) { float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, + CalculateActivationRange(activation, &output_activation_min, &output_activation_max); tflite::FullyConnectedParams op_params; op_params.float_activation_min = output_activation_min; @@ -167,8 +165,9 @@ TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = - reinterpret_cast(node->builtin_data); + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto* params = + static_cast(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); @@ -176,23 +175,21 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteType data_type = input->type; - OpData local_data_object; - OpData* data = &local_data_object; - TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, data_type, input, - filter, bias, output, data)); + OpData data; + TF_LITE_ENSURE_STATUS(CalculateOpData(context, params->activation, data_type, + input, filter, bias, output, &data)); // Checks in Prepare ensure input, output and filter types are all the same. switch (input->type) { case kTfLiteFloat32: - return EvalFloat(context, node, params, data, input, filter, bias, + return EvalFloat(context, node, params->activation, input, filter, bias, output); case kTfLiteInt8: - return EvalQuantizedInt8(context, node, params, data, input, filter, bias, + return EvalQuantizedInt8(context, node, data, input, filter, bias, output); case kTfLiteUInt8: - return EvalQuantized(context, node, params, data, input, filter, bias, - output); + return EvalQuantized(context, node, data, input, filter, bias, output); default: TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc index 7a31eb77491..03eba5082af 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc @@ -25,7 +25,6 @@ limitations under the License. #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/padding.h" #include "tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h" -#include "tensorflow/lite/micro/kernels/xtensa_hifimini/utils.h" namespace tflite { namespace ops { @@ -66,7 +65,7 @@ void ConvPerChannel(const ConvParams& params, const int32* output_multiplier, const int output_width = output_shape.Dims(2); const int output_depth = output_shape.Dims(3); - ae_p24x2s input_offset_24x2 = AE_CONVERT_INT32_24x2(input_offset); + ae_p24x2s input_offset_24x2 = AE_MOVPA24(input_offset); ae_q56s output_offset_56 = AE_CVTQ48A32S(output_offset); ae_q56s output_activation_min_56 = AE_CVTQ48A32S(output_activation_min); ae_q56s output_activation_max_56 = AE_CVTQ48A32S(output_activation_max); @@ -150,9 +149,6 @@ void ConvPerChannel(const ConvParams& params, const int32* output_multiplier, acc_24x2, output_multiplier[out_channel], output_shift[out_channel]); - // Shift from 48bit aligned to 32bit: - acc_56 = AE_Q56S_SLAI(acc_56, 16); - // Add output offset, cap activation, and assign to the output: acc_56 = AE_ADDQ56(acc_56, output_offset_56); acc_56 = AE_MINQ56S(acc_56, output_activation_max_56); @@ -178,7 +174,7 @@ inline void Conv1x32Input32x32Filter( const RuntimeShape& filter_shape, const int8* filter_data, const RuntimeShape& bias_shape, const int32* bias_data, const RuntimeShape& output_shape, int8* output_data) { - ae_p24x2s input_offset_24x2 = AE_CONVERT_INT32_24x2(input_offset); + ae_p24x2s input_offset_24x2 = AE_MOVPA24(input_offset); ae_q56s output_offset_56 = AE_CVTQ48A32S(output_offset); ae_q56s output_activation_max_56 = AE_CVTQ48A32S(quantized_activation_max); ae_q56s output_activation_min_56 = AE_CVTQ48A32S(quantized_activation_min); @@ -227,13 +223,10 @@ inline void Conv1x32Input32x32Filter( acc_56 = AE_Q56S_SLAI(acc_56, 8); ae_p24x2s acc_24x2 = AE_TRUNCP24Q48(acc_56); - // Apply quantized multiplier and accumulate result at 48bit - // alignment: + // Apply quantized multiplier and accumulate result at 48bit alignment. + // Convert the (unsigned) 32-bit multiplier down to a 24-bit multiplier. acc_56 = micro::xtensa::hifimini::MultiplyByQuantizedMultiplier( - acc_24x2, output_multiplier[ch], output_shift[ch]); - - // Shift from 48bit aligned to 32bit: - acc_56 = AE_Q56S_SLAI(acc_56, 16); + acc_24x2, output_multiplier[ch] >> 8, output_shift[ch]); // Add output offset, cap activation, and assign to the output: acc_56 = AE_ADDQ56(acc_56, output_offset_56); diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc index 4781f70b1fa..75bc29efdfc 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc @@ -25,7 +25,6 @@ limitations under the License. #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/padding.h" #include "tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h" -#include "tensorflow/lite/micro/kernels/xtensa_hifimini/utils.h" namespace tflite { namespace ops { @@ -69,7 +68,7 @@ inline void DepthwiseConvPerChannel( const int output_width = output_shape.Dims(2); const int output_depth = output_shape.Dims(3); - ae_p24x2s input_offset_24x2 = AE_CONVERT_INT32_24x2(input_offset); + ae_p24x2s input_offset_24x2 = AE_MOVPA24(input_offset); ae_q56s output_offset_56 = AE_CVTQ48A32S(output_offset); ae_q56s output_activation_min_56 = AE_CVTQ48A32S(output_activation_min); ae_q56s output_activation_max_56 = AE_CVTQ48A32S(output_activation_max); @@ -114,14 +113,14 @@ inline void DepthwiseConvPerChannel( // shift into 24bit space. Note: value is duplicated in the HH // and LL register - but all calculations are done on the HH // side. - ae_p24x2s input_val_24x2 = AE_CONVERT_INT32_24x2(input_val); + ae_p24x2s input_val_24x2 = AE_MOVPA24(input_val); // Add input offset (24bit aligned): input_val_24x2 = AE_P24S_ADDS_P24X2S(input_val_24x2, input_offset_24x2); // Load filter 8bit value into 24bit alignment: - ae_p24x2s filter_val_24x2 = AE_CONVERT_INT32_24x2(filter_val); + ae_p24x2s filter_val_24x2 = AE_MOVPA24(filter_val); // Multiply and accumulate the HH side of each 24x24 PR // register: @@ -150,9 +149,6 @@ inline void DepthwiseConvPerChannel( acc_24x2, output_multiplier[output_channel], output_shift[output_channel]); - // Shift from 48bit aligned to 32bit: - acc_56 = AE_Q56S_SLAI(acc_56, 16); - // Add output offset, cap activation, and assign to the output: acc_56 = AE_ADDQ56(acc_56, output_offset_56); acc_56 = AE_MINQ56S(acc_56, output_activation_max_56); @@ -181,9 +177,10 @@ inline void DepthwiseConv4x32MatchingInputAndFilter( const RuntimeShape& filter_shape, const int8* filter_data, const RuntimeShape& bias_shape, const int32* bias_data, const RuntimeShape& output_shape, int8* output_data) { - const int32_t mult = output_multiplier[0]; + // Convert the (unsigned) 32-bit multiplier down to a 24-bit multiplier. + const int32_t mult = output_multiplier[0] >> 8; const int32_t shift = output_shift[0]; - ae_p24x2s input_offset_24x2 = AE_CONVERT_INT32_24x2(input_offset); + ae_p24x2s input_offset_24x2 = AE_MOVPA24(input_offset); ae_q56s output_offset_56 = AE_CVTQ48A32S(output_offset); ae_q56s output_activation_min_56 = AE_CVTQ48A32S(quantized_activation_min); ae_q56s output_activation_max_56 = AE_CVTQ48A32S(quantized_activation_max); @@ -270,10 +267,6 @@ inline void DepthwiseConv4x32MatchingInputAndFilter( block_1_acc = micro::xtensa::hifimini::MultiplyByQuantizedMultiplier( acc_24x2_1, mult, shift); - // Shift from 48bit aligned to 32bit: - block_0_acc = AE_Q56S_SLAI(block_0_acc, 16); - block_1_acc = AE_Q56S_SLAI(block_1_acc, 16); - // Add output offset, cap activation, and assign to the output: block_0_acc = AE_ADDQ56(block_0_acc, output_offset_56); block_1_acc = AE_ADDQ56(block_1_acc, output_offset_56); diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h b/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h index 4ffb3653f50..918192c4d8f 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h @@ -23,7 +23,6 @@ limitations under the License. #include #include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/kernels/xtensa_hifimini/utils.h" namespace tflite { namespace ops { @@ -31,80 +30,9 @@ namespace micro { namespace xtensa { namespace hifimini { -// -// Multiply 32bit value by a quantized multiplier (w/ shift) and returns a 48bit -// aligned value in the QR register. -// -inline ae_q56s MultiplyByQuantizedMultiplier(int32_t x, - int32_t quantized_multiplier, - int shift) { - // These boolean factors will carry an additional 2^8 (e.g 256) factor - // throughout the equation to cover the missing 8 bits of precision when a - // 32bit integer is outside the bounds of INT24. The additional scaling factor - // will be adjusted after the final multiplication in this method. - // - // The Q-notation comments in this method describe the calculations that take - // place when both |x| and the shifted value of |1| overflow the INT24 limits. - bool x_exceeds_24bits = (x <= INT24_MIN || x >= INT24_MAX); - bool shift_exceeds_24bits = false; - - // Q31.0 -> Q23.0 / 2^8 - ae_p24x2s x_24x2 = AE_CONVERT_INT32_24x2(x); - - if (shift > 0) { - int shifted = 1 << shift; - if (shifted <= INT24_MIN || shifted >= INT24_MAX) { - shift_exceeds_24bits = true; - } - - // Load the shifted value into the PR register: - // Q31.0 -> Q23.0 / 2^8 - ae_p24x2s shifted_24x2 = AE_CONVERT_INT32_24x2(shifted); - - // (Q23.0 / 2^8) * (Q23.0 / 2^8) = Q47.0 / 2^16 - ae_q56s sum_56 = AE_MULP24S_HH(x_24x2, shifted_24x2); - - // Shift left into 24bit space: - // ((Q47.0 / 2^16) << 24) = Q23.24 / 2^16 - sum_56 = AE_Q56S_SLAI(sum_56, 24); - - // Truncate and place on the PR register: - // (Q23.24 / 2^16) -> Q23.0 / 2^16 - x_24x2 = AE_TRUNCP24Q48(sum_56); - } - - // Load the quantized multiplier into the PR register. - // NOTE: This method assumes that this param has been calculated for 24bit - // space - not 32bits. - // Q0.31 -> Q0.23 - ae_p24x2s quantized_multiplier_24x2 = - AE_CONVERT_INT32_24x2(quantized_multiplier); - - // Adjust for the additional 8 bits of lost precision throughout this - // function: - int shift_amount = 23; - if (x_exceeds_24bits) { - shift_amount = shift_amount - 8; - } - if (shift_exceeds_24bits) { - shift_amount = shift_amount - 8; - } - - // Find the product of x and the quantized_multiplier and right shift - // to 48bit aligned. - // (Q23.0 / 2^16) * Q23.0 = Q47.0 / 2^16 - // (Q47.0 / 2^16) >> 7 = Q47.0 - ae_q56s result_56 = AE_MULP24S_HH(x_24x2, quantized_multiplier_24x2); - if (shift_amount > 0) { - result_56 = AE_Q56S_SRA(result_56, shift_amount); - } - - if (shift < 0) { - // Handle any negative shift directly on the 48 bit value. - result_56 = AE_Q56S_SRA(result_56, -shift); - } - return result_56; -} +// INT24 MIN/MAX +#define INT24_MIN -8388608 +#define INT24_MAX 8388607 // // Multiply 24bit value by a quantized multiplier (w/ shift) and returns a 48bit @@ -113,62 +41,62 @@ inline ae_q56s MultiplyByQuantizedMultiplier(int32_t x, inline ae_q56s MultiplyByQuantizedMultiplier(ae_p24x2s x_24x2, int32_t quantized_multiplier, int shift) { - // NOTE: x_24x2 = Q23.0 - - // This is an optimized version of a 32 bit MultiplyByQuantizedMultiplier - // operation of TFLite. Sometimes, the shifted value of |x_24x2| can exceed - // the limits of INT24, which requires |AE_CONVERT_INT32_24x2()| to load the - // left-most 24 bits of a 32bit integer. When this occurs, all Q values here - // carry an additional division of 2^8 to account for this loss in precision. - // This division will be applied to the final shift after multiplication. + // A value with 1 sign bit, N integer bits and M fractional bits is + // represented as QN+1.M since the sign bit is included in the integer bits. + // + // The Q notation in this method explains the values represented in each + // variable, along with an implicit division since the quantized_multiplier + // represents a value between 0.5 and 1.0 (Q1.X-1 where X is the bit precision + // of the type). // - // The Q-notation comments in this method describe the calculations that take - // place when both |x| and the shifted value of |1| overflow the INT24 limits. - bool shift_exceeds_24bits = false; - - ae_p24x2s x_shifted_24x2 = x_24x2; - if (shift > 0) { - int shifted = 1 << shift; - if (shifted <= INT24_MIN || shifted >= INT24_MAX) { - shift_exceeds_24bits = true; - } - // Load the shifted value into the PR register: - // Q31.0 -> Q23.0 / 2^8 - ae_p24x2s shifted_24x2 = AE_CONVERT_INT32_24x2(shifted); - - // Q23.0 * (Q23.0 / 2^8) = Q47.0 / 2^8 - ae_q56s sum_56 = AE_MULP24S_HH(x_24x2, shifted_24x2); - - // Shift left into 24bit space: - // ((Q47.0 / 2^8) << 24) = Q23.24 / 2^8 - sum_56 = AE_Q56S_SLAI(sum_56, 24); - - // Truncate and place on the PR register: - // (Q23.24 / 2^8) -> Q23.0 / 2^8 - x_shifted_24x2 = AE_ROUNDSP24Q48SYM(sum_56); - } - // Load the quantized multiplier into the PR register. // NOTE: This method assumes that this param has been calculated for 24bit // space - not 32bits. - // Q0.31 -> Q0.23 - ae_p24x2s quantized_multiplier_24x2 = - AE_CONVERT_INT32_24x2(quantized_multiplier); + // Q32.0 / 2^23 -> Q24.0 / 2^23 representing a Q1.23 multiplier. + ae_p24x2s quantized_multiplier_24x2 = AE_MOVPA24(quantized_multiplier); + // Shift right by 23 - 16 bits minus the specified shift. This is because we + // keep 16 fractional bits until the end to perform rounding. Subtract shift + // since shift is a left shift, and the 23-16 is a right shift. + int shift_amount = 7 - shift; - // Find the product of x and the quantized_multiplier and right shift - // to 48bit aligned. - // NOTE: Adjust for the additional 8 bits of lost precision throughout this - // function: - // (Q23.0 / 2^8) * Q23.0 = Q47.0 / 2^8 - // (Q47.0 / 2^8) >> 7 = Q47.0 - ae_q56s result = AE_MULP24S_HH(x_shifted_24x2, quantized_multiplier_24x2); - result = AE_Q56S_SRA(result, shift_exceeds_24bits ? 15 : 23); + // Find the product of x and the quantized_multiplier. + // Q24.0 / 2^23 * Q24.0 = Q48.0 / 2^23 + // Q48.0 / 2^23 >> 7 = Q48.0 / 2^16 + ae_q56s result_56 = AE_MULP24S_HH(x_24x2, quantized_multiplier_24x2); - if (shift < 0) { - // Handle any negative shift directly on the 48 bit value. - result = AE_Q56S_SRA(result, -shift); + // Shift right if shift amount is positive, left if shift amount is negative. + if (shift_amount >= 0) { + result_56 = AE_Q56S_SRA(result_56, shift_amount); + } else { + result_56 = AE_Q56S_SLA(result_56, -shift_amount); } - return result; + + // Round off the bottom 16 bits. + // Q48.0 / 2^16 -> Q32.0 aligned to 48 bits. + result_56 = AE_ROUNDSQ32SYM(result_56); + return result_56; +} + +// +// Multiply 32bit value by a quantized multiplier (w/ shift) and returns a 48bit +// aligned value in the QR register. +// +inline ae_q56s MultiplyByQuantizedMultiplier(int32_t x, + int32_t quantized_multiplier, + int shift) { + // Convert x into a 2x24bit PR register file. If x is outside the numerical + // limits of a 24bit integer, the "fractional" or lower 8bits are discarded. + // If x is within the range of a 24 bit integer, the "signed" or upper 8bits + // are discarded. + ae_p24x2s x_24x2; + if (x > INT24_MIN && x < INT24_MAX) { + x_24x2 = AE_MOVPA24(x); + } else { + x_24x2 = static_cast(*reinterpret_cast(&x)); + shift += 8; + } + + return MultiplyByQuantizedMultiplier(x_24x2, quantized_multiplier, shift); } // @@ -193,6 +121,8 @@ inline void QuantizeMultiplier(float multiplier, int32_t* quantized_multiplier, } TFLITE_CHECK_LE(q_fixed, INT24_MAX); + // Ensure shift does not exceed 24-bit range. + TFLITE_CHECK_LE(*shift, 23); if (*shift < -23) { *shift = 0; q_fixed = 0; diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc index 7a535120216..c2c2c86fe81 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc @@ -25,7 +25,6 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h" -#include "tensorflow/lite/micro/kernels/xtensa_hifimini/utils.h" namespace tflite { namespace ops { @@ -108,9 +107,6 @@ inline void FullyConnected( sum_56 = MultiplyByQuantizedMultiplier(sum_24x2, output_multiplier, output_shift); - // Align from 48bit to 32bit on the QR register: - sum_56 = AE_Q56S_SLAI(sum_56, 16); - // Add output_offset and cap min/max values: sum_56 = AE_ADDQ56(sum_56, output_offset_56); sum_56 = AE_MINQ56S(sum_56, output_activation_max_56); diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc index 0708ee7f973..2177bf1c363 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc @@ -22,7 +22,6 @@ limitations under the License. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h" -#include "tensorflow/lite/micro/kernels/xtensa_hifimini/utils.h" namespace tflite { namespace ops { @@ -43,7 +42,7 @@ void AffineQuantize(int scale_multiplier, const ae_p16x2s* input_data_ptr = (const ae_p16x2s*)(input_data - 2); - ae_p24x2s scale_multiplier_24x2 = AE_CONVERT_INT32_24x2(scale_multiplier); + ae_p24x2s scale_multiplier_24x2 = AE_MOVPA24(scale_multiplier); int iters = flat_size / 2; for (int i = 0; i < iters; i++) { diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc index 1847a4e88e8..2b14bedc01f 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc @@ -25,8 +25,6 @@ limitations under the License. #include "tensorflow/lite/kernels/op_macros.h" #include "tensorflow/lite/micro/kernels/activation_utils.h" #include "tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h" -#include "tensorflow/lite/micro/kernels/xtensa_hifimini/utils.h" -#include "tensorflow/lite/micro/micro_utils.h" namespace tflite { namespace ops { @@ -99,7 +97,7 @@ void EvalIntegerSVDF( ae_q56s output_int16_max_56 = AE_CVTQ48A32S(INT16_MAX); ae_q56s output_int16_min_56 = AE_CVTQ48A32S(INT16_MIN); - ae_p24x2s input_zp_24x2 = AE_CONVERT_INT32_24x2(input_zp); + ae_p24x2s input_zp_24x2 = AE_MOVPA24(input_zp); for (int b = 0; b < n_batch; b++) { const int8_t* weight_feature_ptr = weight_feature - 2; @@ -140,8 +138,6 @@ void EvalIntegerSVDF( tflite::ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier( dot_prod_24x2, scale_1_a, scale_1_b); - // Align from 48bit to 32bit on the QR register - dot_prod_56 = AE_Q56S_SLAI(dot_prod_56, 16); // Cap min/max and convert to int32: dot_prod_56 = AE_MAXQ56S(dot_prod_56, output_int16_min_56); dot_prod_56 = AE_MINQ56S(dot_prod_56, output_int16_max_56); @@ -232,8 +228,6 @@ void EvalIntegerSVDF( ae_q56s x_56 = tflite::ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier( scratch_output_tensor[i], scale_2_a, scale_2_b); - // Align from 48bit to 32bit on the QR register: - x_56 = AE_Q56S_SLAI(x_56, 16); // Add output adjustment: x_56 = AE_ADDQ56(x_56, output_zp_56); // Cap min/max and convert to int32 (already aligned to 32bit): diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/utils.h b/tensorflow/lite/micro/kernels/xtensa_hifimini/utils.h deleted file mode 100644 index 59caf4bbf2f..00000000000 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/utils.h +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_XTENSA_HIFIMINI_UTILS_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_XTENSA_HIFIMINI_UTILS_H_ - -#include - -#include - -// INT24 MIN/MAX -#define INT24_MIN -8388608 -#define INT24_MAX 8388607 - -// Converts an int32 value into a 2x24bit PR register file. If the int32 value -// is outside the numerical limits of a 24bit integer, the "fractional" or lower -// 8bits are discarded. If the value is within the range of a 24 bit integer, -// the "signed" or upper 8bits are discarded. -inline ae_p24x2s AE_CONVERT_INT32_24x2(int32_t v) { - if (v > INT24_MIN && v < INT24_MAX) { - return *reinterpret_cast(&v); - } else { - return static_cast(*reinterpret_cast(&v)); - } -} - -// Shifts a 48bit accumulator value into 32bit space and returns the value. -#define AE_CONVERT_Q56_INT32(v) AE_TRUNCA32Q48(AE_Q56S_SLAI(v, 16)) - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_XTENSA_HIFIMINI_UTILS_H_ diff --git a/tensorflow/lite/micro/testing/test_utils.cc b/tensorflow/lite/micro/testing/test_utils.cc index 9f7803fcf62..9174c6c143f 100644 --- a/tensorflow/lite/micro/testing/test_utils.cc +++ b/tensorflow/lite/micro/testing/test_utils.cc @@ -18,6 +18,33 @@ limitations under the License. namespace tflite { namespace testing { +uint8_t F2Q(float value, float min, float max) { + int32_t result = ZeroPointFromMinMax(min, max) + + (value / ScaleFromMinMax(min, max)) + 0.5f; + if (result < std::numeric_limits::min()) { + result = std::numeric_limits::min(); + } + if (result > std::numeric_limits::max()) { + result = std::numeric_limits::max(); + } + return result; +} + +// Converts a float value into a signed eight-bit quantized value. +int8_t F2QS(float value, float min, float max) { + return F2Q(value, min, max) + std::numeric_limits::min(); +} + +int32_t F2Q32(float value, float scale) { + double quantized = value / scale; + if (quantized > std::numeric_limits::max()) { + quantized = std::numeric_limits::max(); + } else if (quantized < std::numeric_limits::min()) { + quantized = std::numeric_limits::min(); + } + return static_cast(quantized); +} + // TODO(b/141330728): Move this method elsewhere as part clean up. void PopulateContext(TfLiteTensor* tensors, int tensors_size, ErrorReporter* error_reporter, TfLiteContext* context) { @@ -41,5 +68,139 @@ void PopulateContext(TfLiteTensor* tensors, int tensors_size, } } +TfLiteTensor CreateFloatTensor(std::initializer_list data, + TfLiteIntArray* dims, const char* name, + bool is_variable) { + return CreateFloatTensor(data.begin(), dims, name, is_variable); +} + +TfLiteTensor CreateBoolTensor(std::initializer_list data, + TfLiteIntArray* dims, const char* name, + bool is_variable) { + return CreateBoolTensor(data.begin(), dims, name, is_variable); +} + +TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims, + const char* name, float min, float max, + bool is_variable) { + TfLiteTensor result; + result.type = kTfLiteUInt8; + result.data.uint8 = const_cast(data); + result.dims = dims; + result.params = {ScaleFromMinMax(min, max), + ZeroPointFromMinMax(min, max)}; + result.allocation_type = kTfLiteMemNone; + result.bytes = ElementCount(*dims) * sizeof(uint8_t); + result.allocation = nullptr; + result.name = name; + result.is_variable = false; + return result; +} + +TfLiteTensor CreateQuantizedTensor(std::initializer_list data, + TfLiteIntArray* dims, const char* name, + float min, float max, bool is_variable) { + return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable); +} + +TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims, + const char* name, float min, float max, + bool is_variable) { + TfLiteTensor result; + result.type = kTfLiteInt8; + result.data.int8 = const_cast(data); + result.dims = dims; + result.params = {ScaleFromMinMax(min, max), + ZeroPointFromMinMax(min, max)}; + result.allocation_type = kTfLiteMemNone; + result.bytes = ElementCount(*dims) * sizeof(int8_t); + result.allocation = nullptr; + result.name = name; + result.is_variable = is_variable; + return result; +} + +TfLiteTensor CreateQuantizedTensor(std::initializer_list data, + TfLiteIntArray* dims, const char* name, + float min, float max, bool is_variable) { + return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable); +} + +TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data, + TfLiteIntArray* dims, const char* name, + bool is_variable) { + TfLiteTensor result; + SymmetricQuantize(data, dims, quantized_data, &result.params.scale); + result.data.uint8 = quantized_data; + result.type = kTfLiteUInt8; + result.dims = dims; + result.params.zero_point = 128; + result.allocation_type = kTfLiteMemNone; + result.bytes = ElementCount(*dims) * sizeof(uint8_t); + result.allocation = nullptr; + result.name = name; + result.is_variable = is_variable; + return result; +} + +TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data, + TfLiteIntArray* dims, const char* name, + bool is_variable) { + TfLiteTensor result; + SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale); + result.data.int8 = quantized_data; + result.type = kTfLiteInt8; + result.dims = dims; + result.params.zero_point = 0; + result.allocation_type = kTfLiteMemNone; + result.bytes = ElementCount(*dims) * sizeof(int8_t); + result.allocation = nullptr; + result.name = name; + result.is_variable = is_variable; + return result; +} + +TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data, + TfLiteIntArray* dims, const char* name, + bool is_variable) { + TfLiteTensor result; + SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale); + result.data.i16 = quantized_data; + result.type = kTfLiteInt16; + result.dims = dims; + result.params.zero_point = 0; + result.allocation_type = kTfLiteMemNone; + result.bytes = ElementCount(*dims) * sizeof(int16_t); + result.allocation = nullptr; + result.name = name; + result.is_variable = is_variable; + return result; +} + +TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims, + const char* name, float scale, + bool is_variable) { + TfLiteTensor result; + result.type = kTfLiteInt32; + result.data.i32 = const_cast(data); + result.dims = dims; + // Quantized int32 tensors always have a zero point of 0, since the range of + // int32 values is large, and because zero point costs extra cycles during + // processing. + result.params = {scale, 0}; + result.allocation_type = kTfLiteMemNone; + result.bytes = ElementCount(*dims) * sizeof(int32_t); + result.allocation = nullptr; + result.name = name; + result.is_variable = is_variable; + return result; +} + +TfLiteTensor CreateQuantized32Tensor(std::initializer_list data, + TfLiteIntArray* dims, const char* name, + float scale, bool is_variable) { + return CreateQuantized32Tensor(data.begin(), dims, name, scale, is_variable); +} + } // namespace testing } // namespace tflite diff --git a/tensorflow/lite/micro/testing/test_utils.h b/tensorflow/lite/micro/testing/test_utils.h index 7aa1e9d488f..b0ebe159b67 100644 --- a/tensorflow/lite/micro/testing/test_utils.h +++ b/tensorflow/lite/micro/testing/test_utils.h @@ -65,182 +65,65 @@ inline int ZeroPointFromMinMax(const float min, const float max) { } // Converts a float value into an unsigned eight-bit quantized value. -inline uint8_t F2Q(const float value, const float min, const float max) { - int32_t result = ZeroPointFromMinMax(min, max) + - (value / ScaleFromMinMax(min, max)) + 0.5f; - if (result < std::numeric_limits::min()) { - result = std::numeric_limits::min(); - } - if (result > std::numeric_limits::max()) { - result = std::numeric_limits::max(); - } - return result; -} +uint8_t F2Q(float value, float min, float max); // Converts a float value into a signed eight-bit quantized value. -inline int8_t F2QS(const float value, const float min, const float max) { - return F2Q(value, min, max) + std::numeric_limits::min(); -} +int8_t F2QS(const float value, const float min, const float max); // Converts a float value into a signed thirty-two-bit quantized value. Note // that values close to max int and min int may see significant error due to // a lack of floating point granularity for large values. -inline int32_t F2Q32(const float value, const float scale) { - double quantized = value / scale; - if (quantized > std::numeric_limits::max()) { - quantized = std::numeric_limits::max(); - } else if (quantized < std::numeric_limits::min()) { - quantized = std::numeric_limits::min(); - } - return static_cast(quantized); -} +int32_t F2Q32(const float value, const float scale); // TODO(b/141330728): Move this method elsewhere as part clean up. void PopulateContext(TfLiteTensor* tensors, int tensors_size, ErrorReporter* error_reporter, TfLiteContext* context); -inline TfLiteTensor CreateFloatTensor(std::initializer_list data, - TfLiteIntArray* dims, const char* name, - bool is_variable = false) { - return CreateFloatTensor(data.begin(), dims, name, is_variable); -} +TfLiteTensor CreateFloatTensor(std::initializer_list data, + TfLiteIntArray* dims, const char* name, + bool is_variable = false); -inline TfLiteTensor CreateBoolTensor(std::initializer_list data, +TfLiteTensor CreateBoolTensor(std::initializer_list data, + TfLiteIntArray* dims, const char* name, + bool is_variable = false); + +TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims, + const char* name, float min, float max, + bool is_variable = false); + +TfLiteTensor CreateQuantizedTensor(std::initializer_list data, + TfLiteIntArray* dims, const char* name, + float min, float max, + bool is_variable = false); + +TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims, + const char* name, float min, float max, + bool is_variable = false); + +TfLiteTensor CreateQuantizedTensor(std::initializer_list data, + TfLiteIntArray* dims, const char* name, + float min, float max, + bool is_variable = false); + +TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data, + TfLiteIntArray* dims, const char* name, + bool is_variable = false); + +TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data, + TfLiteIntArray* dims, const char* name, + bool is_variable = false); + +TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data, + TfLiteIntArray* dims, const char* name, + bool is_variable = false); + +TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims, + const char* name, float scale, + bool is_variable = false); + +TfLiteTensor CreateQuantized32Tensor(std::initializer_list data, TfLiteIntArray* dims, const char* name, - bool is_variable = false) { - return CreateBoolTensor(data.begin(), dims, name, is_variable); -} - -inline TfLiteTensor CreateQuantizedTensor(const uint8_t* data, - TfLiteIntArray* dims, - const char* name, float min, - float max, bool is_variable = false) { - TfLiteTensor result; - result.type = kTfLiteUInt8; - result.data.uint8 = const_cast(data); - result.dims = dims; - result.params = {ScaleFromMinMax(min, max), - ZeroPointFromMinMax(min, max)}; - result.allocation_type = kTfLiteMemNone; - result.bytes = ElementCount(*dims) * sizeof(uint8_t); - result.allocation = nullptr; - result.name = name; - result.is_variable = false; - return result; -} - -inline TfLiteTensor CreateQuantizedTensor(std::initializer_list data, - TfLiteIntArray* dims, - const char* name, float min, - float max, bool is_variable = false) { - return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable); -} - -inline TfLiteTensor CreateQuantizedTensor(const int8_t* data, - TfLiteIntArray* dims, - const char* name, float min, - float max, bool is_variable = false) { - TfLiteTensor result; - result.type = kTfLiteInt8; - result.data.int8 = const_cast(data); - result.dims = dims; - result.params = {ScaleFromMinMax(min, max), - ZeroPointFromMinMax(min, max)}; - result.allocation_type = kTfLiteMemNone; - result.bytes = ElementCount(*dims) * sizeof(int8_t); - result.allocation = nullptr; - result.name = name; - result.is_variable = is_variable; - return result; -} - -inline TfLiteTensor CreateQuantizedTensor(std::initializer_list data, - TfLiteIntArray* dims, - const char* name, float min, - float max, bool is_variable = false) { - return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable); -} - -inline TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data, - TfLiteIntArray* dims, - const char* name, - bool is_variable = false) { - TfLiteTensor result; - SymmetricQuantize(data, dims, quantized_data, &result.params.scale); - result.data.uint8 = quantized_data; - result.type = kTfLiteUInt8; - result.dims = dims; - result.params.zero_point = 128; - result.allocation_type = kTfLiteMemNone; - result.bytes = ElementCount(*dims) * sizeof(uint8_t); - result.allocation = nullptr; - result.name = name; - result.is_variable = is_variable; - return result; -} - -inline TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data, - TfLiteIntArray* dims, - const char* name, - bool is_variable = false) { - TfLiteTensor result; - SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale); - result.data.int8 = quantized_data; - result.type = kTfLiteInt8; - result.dims = dims; - result.params.zero_point = 0; - result.allocation_type = kTfLiteMemNone; - result.bytes = ElementCount(*dims) * sizeof(int8_t); - result.allocation = nullptr; - result.name = name; - result.is_variable = is_variable; - return result; -} - -inline TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data, - TfLiteIntArray* dims, - const char* name, - bool is_variable = false) { - TfLiteTensor result; - SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale); - result.data.i16 = quantized_data; - result.type = kTfLiteInt16; - result.dims = dims; - result.params.zero_point = 0; - result.allocation_type = kTfLiteMemNone; - result.bytes = ElementCount(*dims) * sizeof(int16_t); - result.allocation = nullptr; - result.name = name; - result.is_variable = is_variable; - return result; -} - -inline TfLiteTensor CreateQuantized32Tensor(const int32_t* data, - TfLiteIntArray* dims, - const char* name, float scale, - bool is_variable = false) { - TfLiteTensor result; - result.type = kTfLiteInt32; - result.data.i32 = const_cast(data); - result.dims = dims; - // Quantized int32 tensors always have a zero point of 0, since the range of - // int32 values is large, and because zero point costs extra cycles during - // processing. - result.params = {scale, 0}; - result.allocation_type = kTfLiteMemNone; - result.bytes = ElementCount(*dims) * sizeof(int32_t); - result.allocation = nullptr; - result.name = name; - result.is_variable = is_variable; - return result; -} - -inline TfLiteTensor CreateQuantized32Tensor(std::initializer_list data, - TfLiteIntArray* dims, - const char* name, float scale, - bool is_variable = false) { - return CreateQuantized32Tensor(data.begin(), dims, name, scale, is_variable); -} + float scale, bool is_variable = false); template diff --git a/tensorflow/lite/micro/tools/make/third_party_downloads.inc b/tensorflow/lite/micro/tools/make/third_party_downloads.inc index 6ec1aabaaf0..b331db2c80e 100644 --- a/tensorflow/lite/micro/tools/make/third_party_downloads.inc +++ b/tensorflow/lite/micro/tools/make/third_party_downloads.inc @@ -28,8 +28,8 @@ LEON_BCC2_MD5 := "cdf78082be4882da2a92c9baa82fe765" TSIM_URL := "https://www.gaisler.com/anonftp/tsim/tsim-eval-2.0.63.tar.gz" TSIM_MD5 := "afa0095d3ed989a949e1467f94e41d2f" -CMSIS_URL := "https://github.com/ARM-software/CMSIS_5/archive/3d8235079ade1e4df06f91be65e0309cc45e1952.zip" -CMSIS_MD5 := "f3e93203e875caf4ba6aff0bccd95d85" +CMSIS_URL := "https://github.com/ARM-software/CMSIS_5/archive/8a4db53f69da06e97565fe2f2e8926d193a5759d.zip" +CMSIS_MD5 := "e9864fb71b65adc4f7d92a9dea6e1aab" AM_SDK_URL := "http://s3.asia.ambiqmicro.com/downloads/AmbiqSuite-Rel2.2.0.zip" AM_SDK_MD5 := "7605fa2d4d97e6bb7a1190c92b66b597" @@ -56,8 +56,8 @@ SIFIVE_FE310_LIB_MD5 := "06ee24c4956f8e21670ab3395861fe64" KISSFFT_URL="https://github.com/mborgerding/kissfft/archive/v130.zip" KISSFFT_MD5="438ba1fef5783cc5f5f201395cc477ca" -RUY_URL="https://github.com/google/ruy/archive/9f53ba413e6fc879236dcaa3e008915973d67a4f.zip" -RUY_MD5="ce2c2444cced9dcf6ca6bc908061faa8" +RUY_URL="https://github.com/google/ruy/archive/4bdb31ab484e624deef9620ecde2156ca17f6567.zip" +RUY_MD5="191d6a173a4fde9742f597f0f4e1f08b" CIFAR10_DATASET_URL="https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz" CIFAR10_DATASET_MD5="c32a1d4ab5d03f1284b67883e8d87530" diff --git a/tensorflow/lite/string_util_test.cc b/tensorflow/lite/string_util_test.cc index ec2768d49ec..28d93840c56 100644 --- a/tensorflow/lite/string_util_test.cc +++ b/tensorflow/lite/string_util_test.cc @@ -33,13 +33,22 @@ TEST(StringUtil, TestStringUtil) { t1->type = kTfLiteString; t1->allocation_type = kTfLiteDynamic; - char data[] = {1, 0, 0, 0, 12, 0, 0, 0, 15, 0, 0, 0, 'X', 'Y', 'Z'}; + // String tensor with one string of length 3 + union { + char raw_bytes[15]; + struct { + int32_t num_strs; + int32_t offsets[2]; + char str_data[3]; + } tensor_data; + } data; + data.tensor_data = {1, {12, 15}, {'X', 'Y', 'Z'}}; TfLiteQuantization quant; quant.type = kTfLiteNoQuantization; quant.params = nullptr; - interpreter.SetTensorParametersReadOnly(2, kTfLiteString, "", {1}, quant, - data, 15); + interpreter.SetTensorParametersReadOnly( + 2, kTfLiteString, "", {1}, quant, data.raw_bytes, sizeof(data.raw_bytes)); TfLiteTensor* t2 = interpreter.tensor(2); interpreter.AllocateTensors(); diff --git a/tensorflow/lite/tools/make/download_dependencies.sh b/tensorflow/lite/tools/make/download_dependencies.sh index bea8a326a37..34d7ed3562c 100755 --- a/tensorflow/lite/tools/make/download_dependencies.sh +++ b/tensorflow/lite/tools/make/download_dependencies.sh @@ -37,8 +37,8 @@ EIGEN_URL="$(grep -o 'https.*gitlab.com/libeigen/eigen/-/archive/.*tar\.gz' "${B EIGEN_SHA="$(eval echo $(grep '# SHARED_EIGEN_SHA' "${BZL_FILE_PATH}" | grep -o '\".*\"'))" GEMMLOWP_URL="$(grep -o 'https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/.*zip' "${BZL_FILE_PATH}" | head -n1)" GEMMLOWP_SHA="$(eval echo $(grep '# SHARED_GEMMLOWP_SHA' "${BZL_FILE_PATH}" | grep -o '\".*\"'))" -RUY_URL="https://github.com/google/ruy/archive/9f53ba413e6fc879236dcaa3e008915973d67a4f.zip" -RUY_SHA="fe8345f521bb378745ebdd0f8c5937414849936851d2ec2609774eb2d7098e54" +RUY_URL="https://github.com/google/ruy/archive/4bdb31ab484e624deef9620ecde2156ca17f6567.zip" +RUY_SHA="51c1492196cdd6fc524dd8b539de5d644bbb436699fab3908585a575e347c789" GOOGLETEST_URL="https://github.com/google/googletest/archive/release-1.8.0.tar.gz" GOOGLETEST_SHA="58a6f4277ca2bc8565222b3bbd58a177609e9c488e8a72649359ba51450db7d8" ABSL_URL="$(grep -o 'https://github.com/abseil/abseil-cpp/.*tar.gz' "${BZL_FILE_PATH}" | head -n1)" diff --git a/tensorflow/lite/tools/optimize/sparsity/format_converter.cc b/tensorflow/lite/tools/optimize/sparsity/format_converter.cc index 05cb8b32bf7..3800672a4e2 100644 --- a/tensorflow/lite/tools/optimize/sparsity/format_converter.cc +++ b/tensorflow/lite/tools/optimize/sparsity/format_converter.cc @@ -250,7 +250,7 @@ FormatConverter::FormatConverter(const std::vector& shape, for (int i = 0; i < original_rank; i++) { if (block_dim < block_map_.size() && block_map_[block_dim] == i) { int orig_dim = traversal_order_[original_rank + block_dim]; - block_size_[i] = sparsity.dim_metadata[orig_dim].dense_size; + block_size_[block_dim] = sparsity.dim_metadata[orig_dim].dense_size; blocked_shape_[i] = shape[i] / sparsity.dim_metadata[orig_dim].dense_size; block_dim++; } else { @@ -273,9 +273,10 @@ void FormatConverter::Populate(const T* src_data, std::vector indices, } for (; i < indices.size(); i++) { - int orig_dim = block_map_[traversal_order_[i] - orig_rank]; + const int block_idx = traversal_order_[i] - orig_rank; + const int orig_dim = block_map_[block_idx]; orig_idx[orig_dim] = - orig_idx[orig_dim] * block_size_[orig_dim] + indices[i]; + orig_idx[orig_dim] * block_size_[block_idx] + indices[i]; } data_[GetFlattenedIndex(orig_idx, dense_shape_)] = src_data[*src_data_ptr]; diff --git a/tensorflow/lite/tools/optimize/sparsity/format_converter_test.cc b/tensorflow/lite/tools/optimize/sparsity/format_converter_test.cc index c3351810283..96919d22d4a 100644 --- a/tensorflow/lite/tools/optimize/sparsity/format_converter_test.cc +++ b/tensorflow/lite/tools/optimize/sparsity/format_converter_test.cc @@ -31,18 +31,18 @@ TEST(FormatConverterTest, SimpleTestD0D1) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0 = {3}; const std::vector dm1 = {4}; EXPECT_EQ(dm0, dim_metadata[0]); EXPECT_EQ(dm1, dim_metadata[2]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -55,7 +55,7 @@ TEST(FormatConverterTest, SimpleTestS0D1) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0_0 = {0, 2}; const std::vector dm0_1 = {0, 2}; const std::vector dm1 = {4}; @@ -63,12 +63,12 @@ TEST(FormatConverterTest, SimpleTestS0D1) { EXPECT_EQ(dm0_1, dim_metadata[1]); EXPECT_EQ(dm1, dim_metadata[2]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 0, 9, 8, 5, 0, 0, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -81,7 +81,7 @@ TEST(FormatConverterTest, SimpleTestD0S1) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0 = {3}; const std::vector dm1_0 = {0, 3, 3, 5}; const std::vector dm1_1 = {0, 2, 3, 0, 3}; @@ -89,12 +89,12 @@ TEST(FormatConverterTest, SimpleTestD0S1) { EXPECT_EQ(dm1_0, dim_metadata[2]); EXPECT_EQ(dm1_1, dim_metadata[3]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 9, 8, 5, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -107,7 +107,7 @@ TEST(FormatConverterTest, SimpleTestS0S1) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0_0 = {0, 2}; const std::vector dm0_1 = {0, 2}; const std::vector dm1_0 = {0, 3, 5}; @@ -117,12 +117,12 @@ TEST(FormatConverterTest, SimpleTestS0S1) { EXPECT_EQ(dm1_0, dim_metadata[2]); EXPECT_EQ(dm1_1, dim_metadata[3]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 9, 8, 5, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -135,18 +135,18 @@ TEST(FormatConverterTest, SimpleTestD1D0) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0 = {4}; const std::vector dm1 = {3}; EXPECT_EQ(dm0, dim_metadata[0]); EXPECT_EQ(dm1, dim_metadata[2]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 0, 5, 0, 0, 0, 9, 0, 0, 8, 0, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -159,7 +159,7 @@ TEST(FormatConverterTest, SimpleTestS1D0) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0_0 = {0, 3}; const std::vector dm0_1 = {0, 2, 3}; const std::vector dm1 = {3}; @@ -167,12 +167,12 @@ TEST(FormatConverterTest, SimpleTestS1D0) { EXPECT_EQ(dm0_1, dim_metadata[1]); EXPECT_EQ(dm1, dim_metadata[2]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 0, 5, 9, 0, 0, 8, 0, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -185,7 +185,7 @@ TEST(FormatConverterTest, SimpleTestD1S0) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0 = {4}; const std::vector dm1_0 = {0, 2, 2, 3, 5}; const std::vector dm1_1 = {0, 2, 0, 0, 2}; @@ -193,12 +193,12 @@ TEST(FormatConverterTest, SimpleTestD1S0) { EXPECT_EQ(dm1_0, dim_metadata[2]); EXPECT_EQ(dm1_1, dim_metadata[3]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 5, 9, 8, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -211,7 +211,7 @@ TEST(FormatConverterTest, SimpleTestS1S0) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0_0 = {0, 3}; const std::vector dm0_1 = {0, 2, 3}; const std::vector dm1_0 = {0, 2, 3, 5}; @@ -221,12 +221,12 @@ TEST(FormatConverterTest, SimpleTestS1S0) { EXPECT_EQ(dm1_0, dim_metadata[2]); EXPECT_EQ(dm1_1, dim_metadata[3]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 5, 9, 8, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -239,7 +239,7 @@ TEST(FormatConverterTest, 3DTestS0D1S2) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0_0 = {0, 2}; const std::vector dm0_1 = {0, 2}; const std::vector dm1 = {2}; @@ -252,12 +252,12 @@ TEST(FormatConverterTest, 3DTestS0D1S2) { EXPECT_EQ(dm2_0, dim_metadata[4]); EXPECT_EQ(dm2_1, dim_metadata[5]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 9, 8, 5, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -270,7 +270,7 @@ TEST(FormatConverterTest, 3DTestD0D1S2) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0 = {3}; const std::vector dm1 = {2}; const std::vector dm2_0 = {0, 1, 3, 3, 3, 4, 5}; @@ -281,12 +281,12 @@ TEST(FormatConverterTest, 3DTestD0D1S2) { EXPECT_EQ(dm2_0, dim_metadata[4]); EXPECT_EQ(dm2_1, dim_metadata[5]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {6, 9, 8, 5, 7}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -300,7 +300,7 @@ TEST(FormatConverterTest, 3DTestS0S1S2) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0_0 = {0, 2}; const std::vector dm0_1 = {0, 2}; const std::vector dm1_0 = {0, 2, 5}; @@ -314,12 +314,12 @@ TEST(FormatConverterTest, 3DTestS0S1S2) { EXPECT_EQ(dm2_0, dim_metadata[4]); EXPECT_EQ(dm2_1, dim_metadata[5]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {1, 7, 5, 2, 4, 8, 3, 9}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -333,7 +333,7 @@ TEST(FormatConverterTest, 3DTestS0S2S1) { FormatConverter converter(dense_shape, traversal_order, format); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm0_0 = {0, 2}; const std::vector dm0_1 = {0, 2}; const std::vector dm1_0 = {0, 2, 5}; @@ -347,12 +347,12 @@ TEST(FormatConverterTest, 3DTestS0S2S1) { EXPECT_EQ(dm2_0, dim_metadata[4]); EXPECT_EQ(dm2_1, dim_metadata[5]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {1, 7, 5, 2, 4, 8, 3, 9}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -369,25 +369,58 @@ TEST(FormatConverterTest, BlockTestD0D1) { block_size, block_map); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm = {2}; EXPECT_EQ(dm, dim_metadata[0]); EXPECT_EQ(dm, dim_metadata[2]); EXPECT_EQ(dm, dim_metadata[4]); EXPECT_EQ(dm, dim_metadata[6]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {1, 0, 0, 4, 2, 3, 0, 0, 0, 0, 0, 0, 5, 0, 0, 6}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } // BCSR -TEST(FormatConverterTest, BlockTestD0S1) { +TEST(FormatConverterTest, BlockTestD0S11DBlock) { + const std::vector dense_values = {1, 0, 2, 3, 0, 4, 0, 0, + 0, 0, 5, 0, 0, 0, 0, 6}; + const std::vector dense_shape = {4, 4}; + const std::vector traversal_order = {0, 1, 2}; + const std::vector format = {kTfLiteDimDense, + kTfLiteDimSparseCSR}; + const std::vector block_size = {2}; + const std::vector block_map = {1}; + FormatConverter converter(dense_shape, traversal_order, format, + block_size, block_map); + converter.DenseToSparse(dense_values.data()); + + const auto dim_metadata = converter.GetDimMetadata(); + const std::vector dm0 = {4}; + const std::vector dm2 = {2}; + const std::vector dm1_0 = {0, 2, 3, 4, 5}; + const std::vector dm1_1 = {0, 1, 0, 1, 1}; + EXPECT_EQ(dm0, dim_metadata[0]); + EXPECT_EQ(dm1_0, dim_metadata[2]); + EXPECT_EQ(dm1_1, dim_metadata[3]); + EXPECT_EQ(dm2, dim_metadata[4]); + + const auto data = converter.GetData(); + const std::vector expected_data = {1, 0, 2, 3, 0, 4, 5, 0, 0, 6}; + EXPECT_EQ(expected_data, data); + + converter.SparseToDense(expected_data.data()); + const auto data_back = converter.GetData(); + EXPECT_EQ(data_back, dense_values); +} + +// BCSR +TEST(FormatConverterTest, BlockTestD0S12DBlock) { const std::vector dense_values = {1, 0, 2, 3, 0, 4, 0, 0, 0, 0, 5, 0, 0, 0, 0, 6}; const std::vector dense_shape = {4, 4}; @@ -400,7 +433,7 @@ TEST(FormatConverterTest, BlockTestD0S1) { block_size, block_map); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm = {2}; const std::vector dm1_0 = {0, 2, 3}; const std::vector dm1_1 = {0, 1, 1}; @@ -410,12 +443,12 @@ TEST(FormatConverterTest, BlockTestD0S1) { EXPECT_EQ(dm, dim_metadata[4]); EXPECT_EQ(dm, dim_metadata[6]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {1, 0, 0, 4, 2, 3, 0, 0, 5, 0, 0, 6}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -433,7 +466,7 @@ TEST(FormatConverterTest, BlockTestD1S0) { block_size, block_map); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm = {2}; const std::vector dm1_0 = {0, 1, 3}; const std::vector dm1_1 = {0, 0, 1}; @@ -443,12 +476,12 @@ TEST(FormatConverterTest, BlockTestD1S0) { EXPECT_EQ(dm, dim_metadata[4]); EXPECT_EQ(dm, dim_metadata[6]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {1, 0, 0, 4, 2, 0, 3, 0, 5, 0, 0, 6}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -466,7 +499,7 @@ TEST(FormatConverterTest, BlockTestD0S1LastBlockEmpty) { block_size, block_map); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm = {2}; const std::vector dm1_0 = {0, 2, 2}; const std::vector dm1_1 = {0, 1}; @@ -476,12 +509,12 @@ TEST(FormatConverterTest, BlockTestD0S1LastBlockEmpty) { EXPECT_EQ(dm, dim_metadata[4]); EXPECT_EQ(dm, dim_metadata[6]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {1, 0, 0, 4, 2, 3, 0, 0}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } @@ -499,7 +532,7 @@ TEST(FormatConverterTest, BlockTestD0S1ColMajorBlock) { block_size, block_map); converter.DenseToSparse(dense_values.data()); - const auto& dim_metadata = converter.GetDimMetadata(); + const auto dim_metadata = converter.GetDimMetadata(); const std::vector dm = {2}; const std::vector dm1_0 = {0, 3, 4}; const std::vector dm1_1 = {0, 1, 2, 1}; @@ -509,13 +542,13 @@ TEST(FormatConverterTest, BlockTestD0S1ColMajorBlock) { EXPECT_EQ(dm, dim_metadata[4]); EXPECT_EQ(dm, dim_metadata[6]); - const auto& data = converter.GetData(); + const auto data = converter.GetData(); const std::vector expected_data = {1, 1, 0, 0, 2, 2, 3, 3, 0, 0, 4, 4, 5, 0, 0, 0}; EXPECT_EQ(expected_data, data); converter.SparseToDense(expected_data.data()); - const auto& data_back = converter.GetData(); + const auto data_back = converter.GetData(); EXPECT_EQ(data_back, dense_values); } } // namespace diff --git a/tensorflow/python/autograph/converters/control_flow.py b/tensorflow/python/autograph/converters/control_flow.py index 10db16ef1bb..dc26757c46d 100644 --- a/tensorflow/python/autograph/converters/control_flow.py +++ b/tensorflow/python/autograph/converters/control_flow.py @@ -461,6 +461,9 @@ class ControlFlowTransformer(converter.Base): loop_vars, nonlocal_declarations, state_getter_name, state_setter_name) opts = self._create_loop_options(node) + opts.keys.append(gast.Constant('iterate_names', kind=None)) + opts.values.append(gast.Constant( + parser.unparse(node.target, include_encoding_marker=False), kind=None)) if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST): extra_test = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST) diff --git a/tensorflow/python/autograph/g3doc/reference/_control_flow_tutorial.ipynb b/tensorflow/python/autograph/g3doc/reference/_control_flow_tutorial.ipynb new file mode 100644 index 00000000000..0d544ee7725 --- /dev/null +++ b/tensorflow/python/autograph/g3doc/reference/_control_flow_tutorial.ipynb @@ -0,0 +1,862 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "-vLwpT31YOJk" + }, + "source": [ + "TODO(b/138297412): This colab retains some useful code snippets and demonstrations that used to be in the tf.function/AutoGraph customization tutorial, and should be rolled into the existing docs as part of a broader markdown-\u003ecolab conversion." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "otIdN1TS8N7S" + }, + "outputs": [], + "source": [ + "import tensorflow as tf" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "I0xDjO4SHLUD" + }, + "source": [ + "Define a helper function to demonstrate the kinds of errors you might encounter:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "D25apou9IOXa" + }, + "outputs": [], + "source": [ + "import traceback\n", + "import contextlib\n", + "\n", + "# Some helper code to demonstrate the kinds of errors you might encounter.\n", + "@contextlib.contextmanager\n", + "def assert_raises(error_class):\n", + " try:\n", + " yield\n", + " except error_class as e:\n", + " print('Caught expected exception \\n {}:'.format(error_class))\n", + " traceback.print_exc(limit=2)\n", + " except Exception as e:\n", + " raise e\n", + " else:\n", + " raise Exception('Expected {} to be raised but no error was raised!'.format(\n", + " error_class))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "5f05Vr_YBUCz" + }, + "source": [ + "## Using AutoGraph\n", + "\n", + "The [autograph](https://www.tensorflow.org/guide/function) library is fully integrated with `tf.function`, and it will rewrite conditionals and loops which depend on Tensors to run dynamically in the graph.\n", + "\n", + "`tf.cond` and `tf.while_loop` continue to work with `tf.function`, but code with control flow is often easier to write and understand when written in imperative style." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "xgKmkrNTZSyz" + }, + "source": [ + "## AutoGraph: Conditionals\n", + "\n", + "AutoGraph will convert `if` statements into the equivalent `tf.cond` calls.\n", + "\n", + "This substitution is made if the condition is a Tensor. Otherwise, the conditional is executed during tracing." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "20WlM9T2I9EV" + }, + "source": [ + "Here is a function that checks if the resulting graph uses `tf.cond`:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "E-7KllizZYsy" + }, + "outputs": [], + "source": [ + "def test_tf_cond(f, *args):\n", + " g = f.get_concrete_function(*args).graph\n", + " if any(node.name == 'cond' for node in g.as_graph_def().node):\n", + " print(\"{}({}) uses tf.cond.\".format(\n", + " f.__name__, ', '.join(map(str, args))))\n", + " else:\n", + " print(\"{}({}) executes normally.\".format(\n", + " f.__name__, ', '.join(map(str, args))))\n", + "\n", + " print(\" result: \",f(*args).numpy())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "DlqiutEEJHOe" + }, + "source": [ + "This substitution is made if the condition is a Tensor. Otherwise, the conditional is executed during tracing.\n", + "\n", + "Passing a python `True` executes the conditional normally:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "fCMywOXwJLIQ" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def dropout(x, training=True):\n", + " if training:\n", + " x = tf.nn.dropout(x, rate=0.5)\n", + " return x" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "68D2RZ17JM8u" + }, + "outputs": [], + "source": [ + "test_tf_cond(dropout, tf.ones([10], dtype=tf.float32), True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "WEz0QYucJPBa" + }, + "source": [ + "But passing a tensor replaces the python `if` with a `tf.cond`:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "o86paGR-Zadi" + }, + "outputs": [], + "source": [ + "test_tf_cond(dropout, tf.ones([10], dtype=tf.float32), tf.constant(True))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "5xFLfdApZh8q" + }, + "source": [ + "`tf.cond` has a number of subtleties.\n", + "\n", + "it works by tracing both sides of the conditional, and then choosing the appropriate branch at runtime, depending on the condition. Tracing both sides can result in unexpected execution of Python code." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "VTMoZEVaZiwk" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def f(x):\n", + " if x \u003e 0:\n", + " x = x + 1.\n", + " print(\"Tracing `then` branch\")\n", + " else:\n", + " x = x - 1.\n", + " print(\"Tracing `else` branch\")\n", + " return x" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "HqBVIZWb0Qzn" + }, + "outputs": [], + "source": [ + "f(-1.0).numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "BIMfbXlW0QdP" + }, + "outputs": [], + "source": [ + "f(1.0).numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "2nBnJ42v0Pvq" + }, + "outputs": [], + "source": [ + "f(tf.constant(1.0)).numpy()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "zyzzvtN5Jfpb" + }, + "source": [ + "It requires that if one branch creates a tensor used downstream, the other branch must also create that tensor." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "k_dxWHeFZlaQ" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def f():\n", + " if tf.constant(True):\n", + " x = tf.ones([3, 3])\n", + " return x\n", + "\n", + "# Throws an error because both branches need to define `x`.\n", + "with assert_raises(ValueError):\n", + " f()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "wP-LZP6cztnu" + }, + "source": [ + "If you want to be sure that a particular section of control flow is never converted by autograph, then explicitly convert the object to a python type so an error is raised instead: " + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "iG_VDavjzrzV" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def f(x, y):\n", + " if bool(x):\n", + " y = y + 1.\n", + " print(\"Tracing `then` branch\")\n", + " else:\n", + " y = y - 1.\n", + " print(\"Tracing `else` branch\")\n", + " return y" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "kQ4CRP9T0rH2" + }, + "outputs": [], + "source": [ + "f(True, 0).numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ww9tCzHy0rkv" + }, + "outputs": [], + "source": [ + "f(False, 0).numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "ppuV7iug0r7i" + }, + "outputs": [], + "source": [ + "with assert_raises(TypeError):\n", + " f(tf.constant(True), 0.0)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "yho4J0a0ZkQS" + }, + "source": [ + "## AutoGraph and loops\n", + "\n", + "AutoGraph has a few simple rules for converting loops.\n", + "\n", + "- `for`: Convert if the iterable is a tensor\n", + "- `while`: Convert if the while condition depends on a tensor\n", + "\n", + "If a loop is converted, it will be dynamically unrolled with `tf.while_loop`, or in the special case of a `for x in tf.data.Dataset`, transformed into `tf.data.Dataset.reduce`.\n", + "\n", + "If a loop is _not_ converted, it will be statically unrolled " + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "OyzGNQAuZsky" + }, + "outputs": [], + "source": [ + "def test_dynamically_unrolled(f, *args):\n", + " g = f.get_concrete_function(*args).graph\n", + " if any(node.name == 'while' for node in g.as_graph_def().node):\n", + " print(\"{}({}) uses tf.while_loop.\".format(\n", + " f.__name__, ', '.join(map(str, args))))\n", + " elif any(node.name == 'ReduceDataset' for node in g.as_graph_def().node):\n", + " print(\"{}({}) uses tf.data.Dataset.reduce.\".format(\n", + " f.__name__, ', '.join(map(str, args))))\n", + " else:\n", + " print(\"{}({}) gets unrolled.\".format(\n", + " f.__name__, ', '.join(map(str, args))))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "KFO1BSN9JkRP" + }, + "source": [ + "### For loops\n", + "\n", + "Here is a `tf.function` that demonstrates static unrolling:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "frecgTco_00V" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def for_in_range():\n", + " x = 0\n", + " for i in range(5):\n", + " x += i\n", + " return x\n", + "\n", + "test_dynamically_unrolled(for_in_range)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "PMdl0azc_5d4" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def for_in_tfrange():\n", + " x = tf.constant(0, dtype=tf.int32)\n", + " for i in tf.range(5):\n", + " x += i\n", + " return x\n", + "\n", + "test_dynamically_unrolled(for_in_tfrange)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Q7tmncQTZt6_" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def for_in_tfdataset():\n", + " x = tf.constant(0, dtype=tf.int64)\n", + " for i in tf.data.Dataset.range(5):\n", + " x += i\n", + " return x\n", + "\n", + "test_dynamically_unrolled(for_in_tfdataset)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "eyPzDYiJAC8f" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def while_py_cond():\n", + " x = 5\n", + " while x \u003e 0:\n", + " x -= 1\n", + " return x\n", + "\n", + "test_dynamically_unrolled(while_py_cond)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "l6s7aU-padY5" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def while_tf_cond():\n", + " x = tf.constant(5)\n", + " while x \u003e 0:\n", + " x -= 1\n", + " return x\n", + "\n", + "test_dynamically_unrolled(while_tf_cond)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "dSr64Xn6ap-S" + }, + "source": [ + " If you have a `break` or early `return` clause that depends on a tensor, the top-level condition or iterable should also be a tensor.\n", + "\n", + "Compare the following examples:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "hG2Fe_OEAwpY" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def while_py_true_py_break(x):\n", + " while True: # py true\n", + " if x == 0: # py break\n", + " break\n", + " x -= 1\n", + " return x\n", + "\n", + "test_dynamically_unrolled(while_py_true_py_break, 5)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Sr2cn5bY_E_9" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def buggy_while_py_true_tf_break(x):\n", + " while True: # py true\n", + " if tf.equal(x, 0): # tf break\n", + " break\n", + " x -= 1\n", + " return x\n", + "\n", + "with assert_raises(TypeError):\n", + " test_dynamically_unrolled(buggy_while_py_true_tf_break, 5)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Q-VirD-5avdZ" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def while_tf_true_tf_break(x):\n", + " while tf.constant(True): # tf true\n", + " if x == 0: # py break\n", + " break\n", + " x -= 1\n", + " return x\n", + "\n", + "test_dynamically_unrolled(while_tf_true_tf_break, 5)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Upx5J0j8_Ldu" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def buggy_py_for_tf_break():\n", + " x = 0\n", + " for i in range(5): # py for\n", + " if tf.equal(i, 3): # tf break\n", + " break\n", + " x += i\n", + " return x\n", + "\n", + "with assert_raises(TypeError):\n", + " test_dynamically_unrolled(buggy_py_for_tf_break)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "GQHbodav_QMt" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def tf_for_py_break():\n", + " x = 0\n", + " for i in tf.range(5): # tf for\n", + " if i == 3: # py break\n", + " break\n", + " x += i\n", + " return x\n", + "\n", + "test_dynamically_unrolled(tf_for_py_break)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "hyksHW9TCukR" + }, + "source": [ + "In order to accumulate results from a dynamically unrolled loop, you'll want to use `tf.TensorArray`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "HJ3Vb3dXfefN" + }, + "outputs": [], + "source": [ + "batch_size = 2\n", + "seq_len = 3\n", + "feature_size = 4\n", + "\n", + "def rnn_step(inp, state):\n", + " return inp + state\n", + "\n", + "@tf.function\n", + "def dynamic_rnn(rnn_step, input_data, initial_state):\n", + " # [batch, time, features] -\u003e [time, batch, features]\n", + " input_data = tf.transpose(input_data, [1, 0, 2])\n", + " max_seq_len = input_data.shape[0]\n", + "\n", + " states = tf.TensorArray(tf.float32, size=max_seq_len)\n", + " state = initial_state\n", + " for i in tf.range(max_seq_len):\n", + " state = rnn_step(input_data[i], state)\n", + " states = states.write(i, state)\n", + " return tf.transpose(states.stack(), [1, 0, 2])\n", + " \n", + "dynamic_rnn(rnn_step,\n", + " tf.random.uniform([batch_size, seq_len, feature_size]),\n", + " tf.zeros([batch_size, feature_size]))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "9gmLpHY-bkly" + }, + "source": [ + "### Gotcha's\n", + "\n", + "As with `tf.cond`, `tf.while_loop` also comes with a number of subtleties.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "FJdfznhhKO7D" + }, + "source": [ + "#### Zero iterations\n", + "\n", + "Since a loop can execute 0 times, all tensors used downstream of the while_loop must be initialized above the loop.\n", + "\n", + "Here is an example of incorrect code:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "CocT5RHwblrQ" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def buggy_loop_var_uninitialized():\n", + " for i in tf.range(3):\n", + " x = i\n", + " return x\n", + "\n", + "with assert_raises(ValueError):\n", + " buggy_loop_var_uninitialized()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ncr7tRZ1KWh9" + }, + "source": [ + "And the correct version:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Wm7wIKXcCDGf" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def f():\n", + " x = tf.constant(0)\n", + " for i in tf.range(3):\n", + " x = i\n", + " return x\n", + "\n", + "f()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "CM7qXVY0KZHB" + }, + "source": [ + "#### Consistent shapes and types\n", + "\n", + "The shape/dtypes of all loop variables must stay consistent with each iteration.\n", + "\n", + "Here is an incorrect example that attempts to change a tensor's type:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "FSftc9cCbpAo" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def buggy_loop_type_changes():\n", + " x = tf.constant(0, dtype=tf.float32)\n", + " for i in tf.range(3): # Yields tensors of type tf.int32...\n", + " x = i\n", + " return x\n", + "\n", + "with assert_raises(TypeError):\n", + " buggy_loop_type_changes()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "M5l90NAHKsUM" + }, + "source": [ + "Here is an incorrect example that attempts to change a Tensor's shape while iterating:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "kWF189prbuK0" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def buggy_concat():\n", + " x = tf.ones([0, 10])\n", + " for i in tf.range(5):\n", + " x = tf.concat([x, tf.ones([1, 10])], axis=0)\n", + " return x\n", + "\n", + "with assert_raises(ValueError):\n", + " buggy_concat()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "miYnYcznCHeV" + }, + "outputs": [], + "source": [ + "@tf.function\n", + "def concat_with_padding():\n", + " x = tf.zeros([5, 10])\n", + " for i in tf.range(5):\n", + " x = tf.concat([x[:i], tf.ones([1, 10]), tf.zeros([4-i, 10])], axis=0)\n", + " x.set_shape([5, 10])\n", + " return x\n", + "\n", + "concat_with_padding()\n" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "performance.ipynb", + "private_outputs": true, + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tensorflow/python/autograph/g3doc/reference/control_flow.md b/tensorflow/python/autograph/g3doc/reference/control_flow.md index 1e3b2db559c..79cc0f31450 100644 --- a/tensorflow/python/autograph/g3doc/reference/control_flow.md +++ b/tensorflow/python/autograph/g3doc/reference/control_flow.md @@ -420,6 +420,21 @@ def extra_test(break_): break_, = ag__.for_stmt(range(10), extra_test, ..., (break_,)) ``` +Mixing Tensor-dependent `break` and Python-dependent loops is disallowed: + +``` +@tf.function +def buggy_while_py_true_tf_break(x): + while True: # python conditional + if tf.equal(x, 0): # tensor break + break + x -= 1 + return x + +# Raises OperatorNotAllowedInGraphError: using a `tf.Tensor` as a Python `bool` is not allowed +# buggy_while_true_tf_break(5) +``` + ### `continue` statements Code blocks in which `continue` statements are used are rewritten with diff --git a/tensorflow/python/autograph/g3doc/reference/debugging.md b/tensorflow/python/autograph/g3doc/reference/debugging.md index abc30842297..2c2a96cec86 100644 --- a/tensorflow/python/autograph/g3doc/reference/debugging.md +++ b/tensorflow/python/autograph/g3doc/reference/debugging.md @@ -62,7 +62,7 @@ Adding a call to `tf.config.experimental_execute_functions_eagerly` before executing the function will land the debugger in the original code instead: ``` -tf.config.experimental_run_functions_eagerly(True) +tf.config.run_functions_eagerly(True) f(1) ``` diff --git a/tensorflow/python/autograph/operators/control_flow.py b/tensorflow/python/autograph/operators/control_flow.py index 40493f07a2d..87eb83d709c 100644 --- a/tensorflow/python/autograph/operators/control_flow.py +++ b/tensorflow/python/autograph/operators/control_flow.py @@ -501,8 +501,18 @@ def _tf_range_for_stmt( iterate = compat_util.BasicRef(start) + def _value_or(name, var, default): + if (name == opts['iterate_names'] + and isinstance(var, special_values.Undefined)): + return default + return var + def aug_get_state(): - return (iterate.value,) + get_state() + state_vars = get_state() + state_vars = tuple( + _value_or(name, var, iterate.value) + for name, var in zip(symbol_names, state_vars)) + return (iterate.value,) + state_vars def aug_set_state(aug_loop_vars): # TOOD(mdan): Use starred assignment once we can switch to Py3-only syntax. @@ -876,16 +886,19 @@ def _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts): init_vars, loop_vars, new_loop_vars, symbol_names, opts) return new_loop_vars - # Non-v2 while_loop unpacks the results when there is only one return value. - # This enforces consistency across versions. - opts['return_same_structure'] = True - if 'shape_invariants' in opts: opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list( opts['shape_invariants'], init_vars) + while_loop_opts = dict(opts) + while_loop_opts.pop('iterate_names', None) + + # Non-v2 while_loop unpacks the results when there is only one return value. + # This enforces consistency across versions. + while_loop_opts['return_same_structure'] = True + final_loop_vars = control_flow_ops.while_loop( - aug_test, aug_body, init_vars, **opts) + aug_test, aug_body, init_vars, **while_loop_opts) set_state(final_loop_vars) diff --git a/tensorflow/python/autograph/operators/control_flow_test.py b/tensorflow/python/autograph/operators/control_flow_test.py index 222f6d7ed97..5f0a9d09bf3 100644 --- a/tensorflow/python/autograph/operators/control_flow_test.py +++ b/tensorflow/python/autograph/operators/control_flow_test.py @@ -89,7 +89,7 @@ class ForLoopTest(test.TestCase): get_state=lambda: (s,), set_state=set_state, symbol_names=('s',), - opts={}) + opts={'iterate_names': 'i'}) self.assertEqual(self.evaluate(s), (1234,)) def test_range_tensor_explicit_limit_delta(self): @@ -109,7 +109,7 @@ class ForLoopTest(test.TestCase): get_state=lambda: (s,), set_state=set_state, symbol_names=('s',), - opts={}) + opts={'iterate_names': 'i'}) self.assertEqual(self.evaluate(s), (-171207,)) def test_range_tensor_explicit_limit_negative_delta(self): @@ -129,7 +129,7 @@ class ForLoopTest(test.TestCase): get_state=lambda: (s,), set_state=set_state, symbol_names=('s',), - opts={}) + opts={'iterate_names': 'i'}) self.assertEqual(self.evaluate(s), (171207,)) def test_range_tensor_random_delta(self): @@ -150,7 +150,7 @@ class ForLoopTest(test.TestCase): get_state=lambda: (s,), set_state=set_state, symbol_names=('s',), - opts={}) + opts={'iterate_names': 'i'}) self.assertEqual(self.evaluate(s), (1234,)) def test_range_tensor_random_negative_delta(self): @@ -171,7 +171,7 @@ class ForLoopTest(test.TestCase): get_state=lambda: (s,), set_state=set_state, symbol_names=('s',), - opts={}) + opts={'iterate_names': 'i'}) self.assertEqual(self.evaluate(s), (171207,)) def test_tensor_with_extra_test_object_vars(self): diff --git a/tensorflow/python/compat/compat.py b/tensorflow/python/compat/compat.py index db4ba0158a7..8228f5349e9 100644 --- a/tensorflow/python/compat/compat.py +++ b/tensorflow/python/compat/compat.py @@ -31,7 +31,7 @@ from tensorflow.python.util.tf_export import tf_export # This value changes every day with an automatic CL. It can be modified in code # via `forward_compatibility_horizon()` or with the environment variable # TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date. -_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 4, 29) +_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 4, 30) _FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS" _FORWARD_COMPATIBILITY_DATE_NUMBER = None diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py index ad1a98134b8..9dfeec75c95 100644 --- a/tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py +++ b/tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py @@ -19,11 +19,9 @@ from __future__ import print_function from absl.testing import parameterized -from tensorflow.python import tf2 from tensorflow.python.data.experimental.ops import testing from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops -from tensorflow.python.eager import context from tensorflow.python.framework import combinations from tensorflow.python.framework import errors from tensorflow.python.platform import test @@ -34,11 +32,7 @@ class ShuffleAndRepeatFusionTest(test_base.DatasetTestBase, @combinations.generate(test_base.default_test_combinations()) def testShuffleAndRepeatFusion(self): - if tf2.enabled() and context.executing_eagerly(): - expected = "Shuffle" - else: - expected = "ShuffleAndRepeat" - + expected = "ShuffleAndRepeat" dataset = dataset_ops.Dataset.range(10).apply( testing.assert_next([expected])).shuffle(10).repeat(2) options = dataset_ops.Options() diff --git a/tensorflow/python/data/ops/dataset_ops.py b/tensorflow/python/data/ops/dataset_ops.py index 8c6d1f8d454..d41aeb22d87 100644 --- a/tensorflow/python/data/ops/dataset_ops.py +++ b/tensorflow/python/data/ops/dataset_ops.py @@ -1526,7 +1526,7 @@ class DatasetV2(tracking_base.Trackable, composite_tensor.CompositeTensor): >>> result = dataset.map(lambda x: x + 1) >>> # Each element is a tuple containing two `tf.Tensor` objects. - >>> elements = [(1, "foo"), (2, "bar"), (3, "baz)")] + >>> elements = [(1, "foo"), (2, "bar"), (3, "baz")] >>> dataset = tf.data.Dataset.from_generator( ... lambda: elements, (tf.int32, tf.string)) >>> # `map_func` takes two arguments of type `tf.Tensor`. This function @@ -1578,7 +1578,7 @@ name=None)) Note that irrespective of the context in which `map_func` is defined (eager vs. graph), tf.data traces the function and executes it as a graph. To use - Python code inside of the function you have two options: + Python code inside of the function you have a few options: 1) Rely on AutoGraph to convert Python code into an equivalent graph computation. The downside of this approach is that AutoGraph can convert diff --git a/tensorflow/python/distribute/BUILD b/tensorflow/python/distribute/BUILD index 04adc0185ac..4df2088e02d 100644 --- a/tensorflow/python/distribute/BUILD +++ b/tensorflow/python/distribute/BUILD @@ -1517,6 +1517,7 @@ cuda_py_test( tags = [ "multi_and_single_gpu", ], + # b/155301154 broken with XLA:GPU xla_enable_strict_auto_jit = True, deps = [ ":collective_all_reduce_strategy", diff --git a/tensorflow/python/distribute/input_lib.py b/tensorflow/python/distribute/input_lib.py index 0bc183649e6..9defb75c703 100644 --- a/tensorflow/python/distribute/input_lib.py +++ b/tensorflow/python/distribute/input_lib.py @@ -463,23 +463,6 @@ class DistributedIteratorSpec(type_spec.TypeSpec): raise ValueError("Deserialization is currently unsupported for " "DistributedIteratorSpec.") - @staticmethod - def _is_compatible(a, b): - """Returns true if the given type serializations compatible.""" - if type(a) is not type(b): - return False - if isinstance(a, tuple): - return (len(a) == len(b) and - all(DistributedIteratorSpec._is_compatible(x, y) for (x, y) in - zip(a, b))) - if isinstance(a, dict): - return (len(a) == len(b) and sorted(a.keys()) == sorted(b.keys()) and all( - DistributedIteratorSpec._is_compatible(a[k], b[k]) for k in a.keys())) - if isinstance(a, (type_spec.TypeSpec, tensor_shape.TensorShape, - dtypes.DType)): - return a.is_compatible_with(b) - return a == b - # Overriding this method so that we can merge and reconstruct the spec object def most_specific_compatible_type(self, other): """Returns the most specific TypeSpec compatible with `self` and `other`. @@ -495,17 +478,16 @@ class DistributedIteratorSpec(type_spec.TypeSpec): if type(self) is not type(other): raise ValueError("No TypeSpec is compatible with both %s and %s" % (self, other)) - if not self._is_compatible(self._input_workers.serialize(), - other._input_workers.serialize()): + if self._input_workers.serialize() != other._input_workers.serialize(): raise ValueError("_input_workers is not compatible with both %s " "and %s" % (self, other)) - if self._element_spec != other._element_spec: - raise ValueError("_element_spec is not compatible with both %s " - "and %s" % (self, other)) - if id(self._strategy) != id(other._strategy): + if self._strategy is not other._strategy: raise ValueError("tf.distribute strategy is not compatible with both %s " "and %s" % (self, other)) - return DistributedIteratorSpec(self._input_workers, self._element_spec, + element_spec = nest.map_structure( + lambda a, b: a.most_specific_compatible_type(b), self._element_spec, + other._element_spec) + return DistributedIteratorSpec(self._input_workers, element_spec, self._strategy) @property diff --git a/tensorflow/python/distribute/input_lib_type_spec_test.py b/tensorflow/python/distribute/input_lib_type_spec_test.py index 0671875b06d..53bcc576b24 100644 --- a/tensorflow/python/distribute/input_lib_type_spec_test.py +++ b/tensorflow/python/distribute/input_lib_type_spec_test.py @@ -34,6 +34,7 @@ from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec +from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_tensor as ragged_tensor_lib @@ -206,6 +207,40 @@ class InputTypeSpecTest(test.TestCase, parameterized.TestCase): for x in dist_dataset: process_inputs(x) + @combinations.generate( + combinations.combine( + mode=["eager"], + input_type=["dataset"], + distribution=[ + strategy_combinations.mirrored_strategy_with_gpu_and_cpu, + strategy_combinations.tpu_strategy, + ], + enable_get_next_as_optional=[True, False])) + def testMostSpecificCompatibleType(self, input_type, distribution, + enable_get_next_as_optional): + if not tf2.enabled(): + self.skipTest("DistributedIterator has CompositeTensor support in " + "TF 2 only.") + distribution.extended.experimental_enable_get_next_as_optional = ( + enable_get_next_as_optional) + + ds1 = dataset_ops.DatasetV2.range(10).batch(2).batch(5) + ds2 = dataset_ops.DatasetV2.from_tensors( + array_ops.zeros([5, 2], dtypes.int64)) + dist_ds1 = distribution.experimental_distribute_dataset(ds1) + dist_ds2 = distribution.experimental_distribute_dataset(ds2) + + with distribution.scope(): + iter1 = iter(dist_ds1) + iter2 = iter(dist_ds2) + + spec1 = iter1._type_spec # Wrapped TensorSpec has shape [None, None] + spec2 = iter2._type_spec # Wrapped TensorSpec has shape [None, 2] + + self.assertNotEqual(spec1, spec2) + self.assertEqual(spec1, spec1.most_specific_compatible_type(spec2)) + self.assertEqual(spec1, spec2.most_specific_compatible_type(spec1)) + class RaggedTensorDistributedIteratorTest(test.TestCase, parameterized.TestCase): diff --git a/tensorflow/python/eager/backprop.py b/tensorflow/python/eager/backprop.py index 48a3301a3db..3a823bb908e 100644 --- a/tensorflow/python/eager/backprop.py +++ b/tensorflow/python/eager/backprop.py @@ -86,13 +86,13 @@ def make_attr(attr_type, value): # from integer value to class. if attr_type == int(pywrap_tfe.TF_ATTR_TYPE): return dtypes.as_dtype(value) - elif attr_type == [int(pywrap_tfe.TF_ATTR_TYPE)]: + if attr_type == [int(pywrap_tfe.TF_ATTR_TYPE)]: return [dtypes.as_dtype(v) for v in value] - elif attr_type == int(pywrap_tfe.TF_ATTR_SHAPE): + if attr_type == int(pywrap_tfe.TF_ATTR_SHAPE): return tensor_shape.as_shape(value).as_proto() - elif attr_type == [int(pywrap_tfe.TF_ATTR_SHAPE)]: + if attr_type == [int(pywrap_tfe.TF_ATTR_SHAPE)]: return [tensor_shape.as_shape(v).as_proto() for v in value] - elif isinstance(value, str): + if isinstance(value, str): return value.encode() return value @@ -149,10 +149,9 @@ def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs, # This does not work with v1 TensorArrays. if ops.executing_eagerly_outside_functions( ) or control_flow_util.EnableControlFlowV2(ops.get_default_graph()): + gradient_name_scope = "gradient_tape/" if forward_pass_name_scope: - gradient_name_scope = "gradient_tape/" + forward_pass_name_scope + "/" - else: - gradient_name_scope = "gradient_tape/" + gradient_name_scope += forward_pass_name_scope + "/" with ops.name_scope(gradient_name_scope): return grad_fn(mock_op, *out_grads) else: @@ -588,28 +587,27 @@ def aggregate_indexed_slices_gradients(grads): """Aggregates gradients containing `IndexedSlices`s.""" if len(grads) < 1: return None - elif len(grads) == 1: + if len(grads) == 1: return grads[0] - else: - grads = [g for g in grads if g is not None] - # If any gradient is a `Tensor`, sum them up and return a dense tensor - # object. - if any(isinstance(g, ops.Tensor) for g in grads): - return math_ops.add_n(grads) + grads = [g for g in grads if g is not None] + # If any gradient is a `Tensor`, sum them up and return a dense tensor + # object. + if any(isinstance(g, ops.Tensor) for g in grads): + return math_ops.add_n(grads) - # The following `_as_indexed_slices_list` casts ids of IndexedSlices into - # int64. It is to make sure the inputs of `concat` all have same the data - # type. - grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access + # The following `_as_indexed_slices_list` casts ids of IndexedSlices into + # int64. It is to make sure the inputs of `concat` all have same the data + # type. + grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access - grads = [flatten_nested_indexed_slices(x) for x in grads] - # Form IndexedSlices out of the concatenated values and indices. - concat_grad = ops.IndexedSlices( - array_ops.concat([x.values for x in grads], axis=0), - array_ops.concat([x.indices for x in grads], axis=0), - grads[0].dense_shape) + grads = [flatten_nested_indexed_slices(x) for x in grads] + # Form IndexedSlices out of the concatenated values and indices. + concat_grad = ops.IndexedSlices( + array_ops.concat([x.values for x in grads], axis=0), + array_ops.concat([x.indices for x in grads], axis=0), + grads[0].dense_shape) - return concat_grad + return concat_grad def _aggregate_grads(gradients): diff --git a/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py b/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py index 8d8441e76aa..9d049a6d59d 100644 --- a/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py +++ b/tensorflow/python/eager/benchmarks/resnet50/resnet50_test.py @@ -104,11 +104,12 @@ class ResNet50Test(tf.test.TestCase): context.async_wait() self.assertEqual((2, 1000), output.shape) - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt('b/155260334') def test_apply(self): self._apply(defun=False) - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt( + 'TFE_ContextGetExecutorForThread not implemented for tfrt') def test_apply_async(self): self._apply(defun=False, execution_mode=context.ASYNC) @@ -120,7 +121,7 @@ class ResNet50Test(tf.test.TestCase): def test_apply_with_defun_async(self): self._apply(defun=True, execution_mode=context.ASYNC) - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt('b/155260334') def test_apply_no_top(self): device, data_format = resnet50_test_util.device_and_data_format() model = resnet50.ResNet50(data_format, include_top=False) @@ -131,7 +132,7 @@ class ResNet50Test(tf.test.TestCase): if data_format == 'channels_first' else (2, 1, 1, 2048)) self.assertEqual(output_shape, output.shape) - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt('b/155260334') def test_apply_with_pooling(self): device, data_format = resnet50_test_util.device_and_data_format() model = resnet50.ResNet50(data_format, include_top=False, pooling='avg') @@ -140,7 +141,7 @@ class ResNet50Test(tf.test.TestCase): output = model(images, training=False) self.assertEqual((2, 2048), output.shape) - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt('b/155260334') def test_apply_no_average_pooling(self): device, data_format = resnet50_test_util.device_and_data_format() model = resnet50.ResNet50( @@ -152,7 +153,7 @@ class ResNet50Test(tf.test.TestCase): (2, 7, 7, 2048)) self.assertEqual(output_shape, output.shape) - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt('b/155260334') def test_apply_block3_strides(self): device, data_format = resnet50_test_util.device_and_data_format() model = resnet50.ResNet50( @@ -164,7 +165,7 @@ class ResNet50Test(tf.test.TestCase): (2, 1, 1, 2048)) self.assertEqual(output_shape, output.shape) - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt('b/155260334') def test_apply_retrieve_intermediates(self): device, data_format = resnet50_test_util.device_and_data_format() model = resnet50.ResNet50( @@ -219,15 +220,15 @@ class ResNet50Test(tf.test.TestCase): self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'loss') - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt('b/155260334') def test_train(self): self._test_train() - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt('b/155260334') def test_train_async(self): self._test_train(execution_mode=context.ASYNC) - @test_util.disable_tfrt('b/154858769') + @test_util.disable_tfrt('b/155260334') def test_no_garbage(self): device, data_format = resnet50_test_util.device_and_data_format() model = resnet50.ResNet50(data_format) diff --git a/tensorflow/python/eager/context.py b/tensorflow/python/eager/context.py index eb928614817..182b8478420 100644 --- a/tensorflow/python/eager/context.py +++ b/tensorflow/python/eager/context.py @@ -1793,7 +1793,7 @@ def executing_eagerly(): cases. * Executing inside `tf.function`, unless under `tf.init_scope` or - `tf.config.experimental_run_functions_eagerly(True)` is previously called. + `tf.config.run_functions_eagerly(True)` is previously called. * Executing inside a transformation function for `tf.dataset`. * `tf.compat.v1.disable_eager_execution()` is called. @@ -1815,8 +1815,8 @@ def executing_eagerly(): Inside `tf.function` after - `tf.config.experimental_run_functions_eagerly(True)` is called: - >>> tf.config.experimental_run_functions_eagerly(True) + `tf.config.run_functions_eagerly(True)` is called: + >>> tf.config.run_functions_eagerly(True) >>> @tf.function ... def fn(): ... with tf.init_scope(): @@ -1825,7 +1825,7 @@ def executing_eagerly(): >>> fn() True True - >>> tf.config.experimental_run_functions_eagerly(False) + >>> tf.config.run_functions_eagerly(False) Inside a transformation function for `tf.dataset`: @@ -1858,7 +1858,7 @@ def executing_eagerly_v1(): this API might return `False` in the following use cases. * Executing inside `tf.function`, unless under `tf.init_scope` or - `tf.config.experimental_run_functions_eagerly(True)` is previously called. + `tf.config.run_functions_eagerly(True)` is previously called. * Executing inside a transformation function for `tf.dataset`. * `tf.compat.v1.disable_eager_execution()` is called. @@ -1881,9 +1881,9 @@ def executing_eagerly_v1(): False Inside `tf.function` - after `tf.config.experimental_run_functions_eagerly(True)` is called: + after `tf.config.run_functions_eagerly(True)` is called: - >>> tf.config.experimental_run_functions_eagerly(True) + >>> tf.config.run_functions_eagerly(True) >>> @tf.function ... def fn(): ... with tf.init_scope(): @@ -1892,7 +1892,7 @@ def executing_eagerly_v1(): >>> fn() True True - >>> tf.config.experimental_run_functions_eagerly(False) + >>> tf.config.run_functions_eagerly(False) Inside a transformation function for `tf.dataset`: diff --git a/tensorflow/python/eager/def_function.py b/tensorflow/python/eager/def_function.py index e0273324a52..5fbf6c93ca4 100644 --- a/tensorflow/python/eager/def_function.py +++ b/tensorflow/python/eager/def_function.py @@ -41,6 +41,7 @@ from tensorflow.python.ops import resource_variable_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.profiler import traceme from tensorflow.python.training.tracking import base as trackable +from tensorflow.python.util import deprecation from tensorflow.python.util import nest from tensorflow.python.util import object_identity from tensorflow.python.util import tf_decorator @@ -305,8 +306,11 @@ class UnliftedInitializerVariable(resource_variable_ops.UninitializedVariable): RUN_FUNCTIONS_EAGERLY = False +@deprecation.deprecated( + None, + "Use tf.config.run_functions_eagerly instead of the experimental version.") @tf_export("config.experimental_run_functions_eagerly") -def run_functions_eagerly(run_eagerly): +def experimental_run_functions_eagerly(run_eagerly): """Enables / disables eager execution of `tf.function`s. Calling `tf.config.experimental_run_functions_eagerly(True)` will make all @@ -345,6 +349,60 @@ def run_functions_eagerly(run_eagerly): Calling `tf.config.experimental_run_functions_eagerly(False)` will undo this behavior. + Note: This flag has no effect on functions passed into tf.data transformations + as arguments. tf.data functions are never executed eagerly and are always + executed as a compiled Tensorflow Graph. + + Args: + run_eagerly: Boolean. Whether to run functions eagerly. + """ + return run_functions_eagerly(run_eagerly) + + +@tf_export("config.run_functions_eagerly") +def run_functions_eagerly(run_eagerly): + """Enables / disables eager execution of `tf.function`s. + + Calling `tf.config.run_functions_eagerly(True)` will make all + invocations of `tf.function` run eagerly instead of running as a traced graph + function. + + This can be useful for debugging or profiling. For example, let's say you + implemented a simple iterative sqrt function, and you want to collect the + intermediate values and plot the convergence. Appending the values to a list + in `@tf.function` normally wouldn't work since it will just record the Tensors + being traced, not the values. Instead, you can do the following. + + >>> ys = [] + >>> + >>> @tf.function + ... def sqrt(x): + ... y = x / 2 + ... d = y + ... for _ in range(10): + ... d /= 2 + ... if y * y < x: + ... y += d + ... else: + ... y -= d + ... ys.append(y.numpy()) + ... return y + >>> + >>> tf.config.run_functions_eagerly(True) + >>> sqrt(tf.constant(2.)) + + >>> ys + [1.5, 1.25, 1.375, 1.4375, 1.40625, 1.421875, 1.4140625, 1.4179688, 1.4160156, + 1.4150391] + >>> tf.config.run_functions_eagerly(False) + + Calling `tf.config.run_functions_eagerly(False)` will undo this + behavior. + + Note: This flag has no effect on functions passed into tf.data transformations + as arguments. tf.data functions are never executed eagerly and are always + executed as a compiled Tensorflow Graph. + Args: run_eagerly: Boolean. Whether to run functions eagerly. """ @@ -352,9 +410,18 @@ def run_functions_eagerly(run_eagerly): RUN_FUNCTIONS_EAGERLY = bool(run_eagerly) +@deprecation.deprecated( + None, + "Use tf.config.functions_run_eagerly instead of the experimental version.") @tf_export("config.experimental_functions_run_eagerly") -def functions_run_eagerly(): +def experimental_functions_run_eagerly(): """Returns the value of the `experimental_run_functions_eagerly` setting.""" + return functions_run_eagerly() + + +@tf_export("config.functions_run_eagerly") +def functions_run_eagerly(): + """Returns the value of the `run_functions_eagerly` setting.""" return RUN_FUNCTIONS_EAGERLY diff --git a/tensorflow/python/eager/pywrap_tensor.cc b/tensorflow/python/eager/pywrap_tensor.cc index 465da491fe1..a72f74b38b8 100644 --- a/tensorflow/python/eager/pywrap_tensor.cc +++ b/tensorflow/python/eager/pywrap_tensor.cc @@ -762,7 +762,11 @@ static PyTypeObject _EagerTensorType = { sizeof(EagerTensor), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)EagerTensor_dealloc, /* tp_dealloc */ +#if PY_VERSION_HEX < 0x03080000 nullptr, /* tp_print */ +#else + 0, /* tp_vectorcall_offset */ +#endif nullptr, /* tp_getattr */ nullptr, /* tp_setattr */ nullptr, /* tp_compare */ diff --git a/tensorflow/python/eager/pywrap_tfe_src.cc b/tensorflow/python/eager/pywrap_tfe_src.cc index 316f91ec88b..e82a72851ca 100644 --- a/tensorflow/python/eager/pywrap_tfe_src.cc +++ b/tensorflow/python/eager/pywrap_tfe_src.cc @@ -1504,22 +1504,26 @@ static PyTypeObject TFE_Py_Tape_Type = { sizeof(TFE_Py_Tape), /* tp_basicsize */ 0, /* tp_itemsize */ &TFE_Py_Tape_Delete, /* tp_dealloc */ - nullptr, /* tp_print */ - nullptr, /* tp_getattr */ - nullptr, /* tp_setattr */ - nullptr, /* tp_reserved */ - nullptr, /* tp_repr */ - nullptr, /* tp_as_number */ - nullptr, /* tp_as_sequence */ - nullptr, /* tp_as_mapping */ - nullptr, /* tp_hash */ - nullptr, /* tp_call */ - nullptr, /* tp_str */ - nullptr, /* tp_getattro */ - nullptr, /* tp_setattro */ - nullptr, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - "TFE_Py_Tape objects", /* tp_doc */ +#if PY_VERSION_HEX < 0x03080000 + nullptr, /* tp_print */ +#else + 0, /* tp_vectorcall_offset */ +#endif + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + "TFE_Py_Tape objects", /* tp_doc */ }; typedef struct { @@ -1542,22 +1546,26 @@ static PyTypeObject TFE_Py_ForwardAccumulator_Type = { sizeof(TFE_Py_ForwardAccumulator), /* tp_basicsize */ 0, /* tp_itemsize */ &TFE_Py_ForwardAccumulatorDelete, /* tp_dealloc */ - nullptr, /* tp_print */ - nullptr, /* tp_getattr */ - nullptr, /* tp_setattr */ - nullptr, /* tp_reserved */ - nullptr, /* tp_repr */ - nullptr, /* tp_as_number */ - nullptr, /* tp_as_sequence */ - nullptr, /* tp_as_mapping */ - nullptr, /* tp_hash */ - nullptr, /* tp_call */ - nullptr, /* tp_str */ - nullptr, /* tp_getattro */ - nullptr, /* tp_setattro */ - nullptr, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - "TFE_Py_ForwardAccumulator objects", /* tp_doc */ +#if PY_VERSION_HEX < 0x03080000 + nullptr, /* tp_print */ +#else + 0, /* tp_vectorcall_offset */ +#endif + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + "TFE_Py_ForwardAccumulator objects", /* tp_doc */ }; typedef struct { @@ -1577,12 +1585,16 @@ static PyTypeObject TFE_Py_VariableWatcher_Type = { sizeof(TFE_Py_VariableWatcher), /* tp_basicsize */ 0, /* tp_itemsize */ &TFE_Py_VariableWatcher_Delete, /* tp_dealloc */ - nullptr, /* tp_print */ - nullptr, /* tp_getattr */ - nullptr, /* tp_setattr */ - nullptr, /* tp_reserved */ - nullptr, /* tp_repr */ - nullptr, /* tp_as_number */ +#if PY_VERSION_HEX < 0x03080000 + nullptr, /* tp_print */ +#else + 0, /* tp_vectorcall_offset */ +#endif + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ nullptr, /* tp_as_sequence */ nullptr, /* tp_as_mapping */ nullptr, /* tp_hash */ diff --git a/tensorflow/python/framework/ops_test.py b/tensorflow/python/framework/ops_test.py index 59ac5b9a280..b97c0d03395 100644 --- a/tensorflow/python/framework/ops_test.py +++ b/tensorflow/python/framework/ops_test.py @@ -3432,7 +3432,7 @@ ops.register_tensor_conversion_function( class CustomConvertToCompositeTensorTest(test_util.TensorFlowTestCase): - @test_util.disable_tfrt("b/154858769") + @test_util.disable_tfrt("TODO(kkb): This makes Kokoro tests fail.") def testCompositeTensorConversion(self): """Tests that a user can register a CompositeTensor converter.""" x = _MyTuple((1, [2., 3.], [[4, 5], [6, 7]])) diff --git a/tensorflow/python/keras/engine/base_layer.py b/tensorflow/python/keras/engine/base_layer.py index 2fc0513341d..4085ae6e0da 100644 --- a/tensorflow/python/keras/engine/base_layer.py +++ b/tensorflow/python/keras/engine/base_layer.py @@ -1213,7 +1213,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): collected_losses.append(loss_tensor) return collected_losses - def add_loss(self, losses, inputs=None): + def add_loss(self, losses, **kwargs): """Add loss tensor(s), potentially dependent on layer inputs. Some losses (for instance, activity regularization losses) may be dependent @@ -1230,7 +1230,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): ```python class MyLayer(tf.keras.layers.Layer): def call(self, inputs): - self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True) + self.add_loss(tf.abs(tf.reduce_mean(inputs))) return inputs ``` @@ -1270,14 +1270,13 @@ class Layer(module.Module, version_utils.LayerVersionSelector): Arguments: losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses may also be zero-argument callables which create a loss tensor. - inputs: Ignored when executing eagerly. If anything other than None is - passed, it signals the losses are conditional on some of the layer's - inputs, and thus they should only be run where these inputs are - available. This is the case for activity regularization losses, for - instance. If `None` is passed, the losses are assumed - to be unconditional, and will apply across all dataflows of the layer - (e.g. weight regularization losses). + **kwargs: Additional keyword arguments for backward compatibility. + Accepted values: + inputs - Deprecated, will be automatically inferred. """ + kwargs.pop('inputs', None) + if kwargs: + raise TypeError('Unknown keyword arguments: %s' % (kwargs.keys(),)) def _tag_callable(loss): """Tags callable loss tensor as `_unconditional_loss`.""" diff --git a/tensorflow/python/keras/engine/base_layer_utils.py b/tensorflow/python/keras/engine/base_layer_utils.py index 30ac17d8270..34e907f835f 100644 --- a/tensorflow/python/keras/engine/base_layer_utils.py +++ b/tensorflow/python/keras/engine/base_layer_utils.py @@ -37,6 +37,7 @@ from tensorflow.python.ops import variables as tf_variables from tensorflow.python.training.tracking import base as tracking from tensorflow.python.util import nest from tensorflow.python.util import tf_contextlib +from tensorflow.python.util.tf_export import keras_export _call_context = threading.local() @@ -660,33 +661,42 @@ def mark_as_return(outputs, acd): V2_DTYPE_BEHAVIOR = None +@keras_export(v1=['keras.layers.enable_v2_dtype_behavior']) def enable_v2_dtype_behavior(): """Enable the V2 dtype behavior for Keras layers. - By default, the V2 dtype behavior is enabled in TensorFlow 2. + By default, the V2 dtype behavior is enabled in TensorFlow 2, so this function + is only useful if `tf.compat.v1.disable_v2_behavior` has been called. Since + mixed precision requires V2 dtype behavior to be enabled, this function allows + you to use mixed precision in Keras layers if `disable_v2_behavior` has been + called. When enabled, the dtype of Keras layers defaults to floatx (which is typically float32) instead of None. In addition, layers will automatically cast floating-point inputs to the layer's dtype. - For example, once enabled, the following block will run a Conv2D layer - in float32: - - ```python - x = tf.ones((4, 4, 4, 4), dtype='float64') - layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) - print(layer.dtype) # Float32 when enabled. None when disabled. - # When enabled, will cast inputs to the layer's dtype, which is float32. When - # disabled, will do no casting, so the layer is done in float64. - y = layer(x) - ``` + >>> tf.compat.v1.keras.layers.disable_v2_dtype_behavior() + >>> x = tf.ones((4, 4, 4, 4), dtype='float64') + >>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) + >>> print(layer.dtype) # None since V2 behavior is disabled + None + >>> y = layer(x) # Doesn't cast inputs since V2 dtype behavior is disabled + >>> print(y.dtype.name) + float64 + >>> tf.compat.v1.keras.layers.enable_v2_dtype_behavior() + >>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2) + >>> print(layer.dtype) # float32 since V2 dtype behavior is enabled + float32 + >>> y = layer(x) # Layer casts inputs since V2 dtype behavior is enabled + >>> print(y.dtype.name) + float32 A layer author can opt-out their layer from the automatic input casting by passing `autocast=False` to the base Layer's constructor. This disables the autocasting part of the V2 behavior for that layer, but not the defaulting to floatx part of the V2 behavior. - When a global `tf.keras.mixed_precision.experimental.Policy` is set, the + When a global `tf.keras.mixed_precision.experimental.Policy` is set, a Keras layer's dtype will default to the global policy instead of floatx. Layers will automatically cast inputs to the policy's compute_dtype. """ @@ -694,12 +704,11 @@ def enable_v2_dtype_behavior(): V2_DTYPE_BEHAVIOR = True +@keras_export(v1=['keras.layers.disable_v2_dtype_behavior']) def disable_v2_dtype_behavior(): """Disables the V2 dtype behavior for Keras layers. - See `enable_v2_dtype_behavior`. - - This function will be removed in the future. + See `tf.compat.v1.keras.layers.enable_v2_dtype_behavior`. """ global V2_DTYPE_BEHAVIOR V2_DTYPE_BEHAVIOR = False diff --git a/tensorflow/python/keras/engine/training_v1.py b/tensorflow/python/keras/engine/training_v1.py index 3553dea89fb..0a40ce3899b 100644 --- a/tensorflow/python/keras/engine/training_v1.py +++ b/tensorflow/python/keras/engine/training_v1.py @@ -534,7 +534,7 @@ class Model(training_lib.Model): 'is enabled.') if not self.dynamic: if self._run_eagerly is None: - # Respect `tf.config.experimental_run_functions_eagerly` unless + # Respect `tf.config.run_functions_eagerly` unless # `run_eagerly` was explicitly passed to `compile`. return def_function.RUN_FUNCTIONS_EAGERLY else: diff --git a/tensorflow/python/keras/layers/core.py b/tensorflow/python/keras/layers/core.py index b6dd3c3500b..00f6540a392 100644 --- a/tensorflow/python/keras/layers/core.py +++ b/tensorflow/python/keras/layers/core.py @@ -56,6 +56,7 @@ from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export +# pylint: disable=g-classes-have-attributes @keras_export('keras.layers.Masking') class Masking(Layer): """Masks a sequence by using a mask value to skip timesteps. @@ -1072,17 +1073,17 @@ class Dense(Layer): Example: - ```python - # as first layer in a sequential model: - model = Sequential() - model.add(Dense(32, input_shape=(16,))) - # now the model will take as input arrays of shape (*, 16) - # and output arrays of shape (*, 32) - - # after the first layer, you don't need to specify - # the size of the input anymore: - model.add(Dense(32)) - ``` + >>> # Create a `Sequential` model and add a Dense layer as the first layer. + >>> model = tf.keras.models.Sequential() + >>> model.add(tf.keras.Input(shape=(16,))) + >>> model.add(tf.keras.layers.Dense(32, activation='relu')) + >>> # Now the model will take as input arrays of shape (None, 16) + >>> # and output arrays of shape (None, 32). + >>> # Note that after the first layer, you don't need to specify + >>> # the size of the input anymore: + >>> model.add(tf.keras.layers.Dense(32)) + >>> model.output_shape + (None, 32) Arguments: units: Positive integer, dimensionality of the output space. diff --git a/tensorflow/python/keras/layers/preprocessing/BUILD b/tensorflow/python/keras/layers/preprocessing/BUILD index 2a9971f2944..82ed3e4797f 100644 --- a/tensorflow/python/keras/layers/preprocessing/BUILD +++ b/tensorflow/python/keras/layers/preprocessing/BUILD @@ -394,17 +394,17 @@ tf_py_test( ], ) -tpu_py_test( - name = "normalization_tpu_test", - srcs = ["normalization_tpu_test.py"], - disable_experimental = True, +distribute_py_test( + name = "normalization_distribution_test", + srcs = ["normalization_distribution_test.py"], + main = "normalization_distribution_test.py", python_version = "PY3", tags = ["no_oss"], deps = [ ":normalization", - "//tensorflow/python/distribute:tpu_strategy", + "//tensorflow/python/distribute:combinations", + "//tensorflow/python/distribute:strategy_combinations", "//tensorflow/python/keras", - "//tensorflow/python/keras/distribute:tpu_strategy_test_utils", ], ) diff --git a/tensorflow/python/keras/layers/preprocessing/normalization.py b/tensorflow/python/keras/layers/preprocessing/normalization.py index 5a0b8990486..b087a2101c7 100644 --- a/tensorflow/python/keras/layers/preprocessing/normalization.py +++ b/tensorflow/python/keras/layers/preprocessing/normalization.py @@ -101,7 +101,7 @@ class Normalization(CombinerPreprocessingLayer): self.count = self._add_state_variable( name=_COUNT_NAME, shape=(), - dtype=dtypes.int32, + dtype=dtypes.int64, initializer=init_ops.zeros_initializer) super(Normalization, self).build(input_shape) diff --git a/tensorflow/python/keras/layers/preprocessing/normalization_distribution_test.py b/tensorflow/python/keras/layers/preprocessing/normalization_distribution_test.py new file mode 100644 index 00000000000..f22556ef723 --- /dev/null +++ b/tensorflow/python/keras/layers/preprocessing/normalization_distribution_test.py @@ -0,0 +1,136 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for keras.layers.preprocessing.normalization.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensorflow.python import keras +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.distribute import combinations +from tensorflow.python.distribute import strategy_combinations +from tensorflow.python.eager import context +from tensorflow.python.keras import keras_parameterized +from tensorflow.python.keras.layers.preprocessing import normalization +from tensorflow.python.keras.layers.preprocessing import normalization_v1 +from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils +from tensorflow.python.platform import test + + +def get_layer_class(): + if context.executing_eagerly(): + return normalization.Normalization + else: + return normalization_v1.Normalization + + +def _get_layer_computation_test_cases(): + test_cases = ({ + "adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32), + "axis": -1, + "test_data": np.array([[1.], [2.], [3.]], np.float32), + "expected": np.array([[-1.414214], [-.707107], [0]], np.float32), + "testcase_name": "2d_single_element" + }, { + "adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32), + "axis": None, + "test_data": np.array([[1.], [2.], [3.]], np.float32), + "expected": np.array([[-1.414214], [-.707107], [0]], np.float32), + "testcase_name": "2d_single_element_none_axis" + }, { + "adapt_data": np.array([[1., 2., 3., 4., 5.]], dtype=np.float32), + "axis": None, + "test_data": np.array([[1.], [2.], [3.]], np.float32), + "expected": np.array([[-1.414214], [-.707107], [0]], np.float32), + "testcase_name": "2d_single_element_none_axis_flat_data" + }, { + "adapt_data": + np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]], + np.float32), + "axis": + 1, + "test_data": + np.array([[[1., 2., 3.], [2., 3., 4.]], [[3., 4., 5.], [4., 5., 6.]]], + np.float32), + "expected": + np.array([[[-1.549193, -0.774597, 0.], [-1.549193, -0.774597, 0.]], + [[0., 0.774597, 1.549193], [0., 0.774597, 1.549193]]], + np.float32), + "testcase_name": + "3d_internal_axis" + }, { + "adapt_data": + np.array( + [[[1., 0., 3.], [2., 3., 4.]], [[3., -1., 5.], [4., 5., 8.]]], + np.float32), + "axis": (1, 2), + "test_data": + np.array( + [[[3., 1., -1.], [2., 5., 4.]], [[3., 0., 5.], [2., 5., 8.]]], + np.float32), + "expected": + np.array( + [[[1., 3., -5.], [-1., 1., -1.]], [[1., 1., 1.], [-1., 1., 1.]]], + np.float32), + "testcase_name": + "3d_multiple_axis" + }) + + crossed_test_cases = [] + # Cross above test cases with use_dataset in (True, False) + for use_dataset in (True, False): + for case in test_cases: + case = case.copy() + if use_dataset: + case["testcase_name"] = case["testcase_name"] + "_with_dataset" + case["use_dataset"] = use_dataset + crossed_test_cases.append(case) + + return crossed_test_cases + + +@combinations.generate( + combinations.times( + combinations.combine( + distribution=strategy_combinations.all_strategies, + mode=["eager", "graph"]), _get_layer_computation_test_cases())) +class NormalizationTest(keras_parameterized.TestCase, + preprocessing_test_utils.PreprocessingLayerTest): + + def test_layer_computation(self, distribution, adapt_data, axis, test_data, + use_dataset, expected): + input_shape = tuple([None for _ in range(test_data.ndim - 1)]) + if use_dataset: + # Keras APIs expect batched datasets + adapt_data = dataset_ops.Dataset.from_tensor_slices(adapt_data).batch( + test_data.shape[0] // 2) + test_data = dataset_ops.Dataset.from_tensor_slices(test_data).batch( + test_data.shape[0] // 2) + + with distribution.scope(): + input_data = keras.Input(shape=input_shape) + layer = get_layer_class()(axis=axis) + layer.adapt(adapt_data) + output = layer(input_data) + model = keras.Model(input_data, output) + output_data = model.predict(test_data) + self.assertAllClose(expected, output_data) + + +if __name__ == "__main__": + test.main() diff --git a/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py b/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py index 6ce3079788c..41591d3edbd 100644 --- a/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py +++ b/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py @@ -287,14 +287,14 @@ class AutoCastVariableTest(test.TestCase, parameterized.TestCase): self.evaluate(x.initializer) # outside of auto cast scope. - v1 = constant_op.constant(3.14, dtype=dtypes.float32) - v2 = constant_op.constant(3.14, dtype=dtypes.float16) + v1 = constant_op.constant(3., dtype=dtypes.float32) + v2 = constant_op.constant(3., dtype=dtypes.float16) def run_and_check(): # Assign float32 values - self.assertAllClose(3.14, self.evaluate(x.assign(v1))) - self.assertAllClose(3.14 * 2, self.evaluate(x.assign_add(v1))) - self.assertAllClose(3.14, self.evaluate(x.assign_sub(v1))) + self.assertAllClose(3., self.evaluate(x.assign(v1))) + self.assertAllClose(3. * 2, self.evaluate(x.assign_add(v1))) + self.assertAllClose(3., self.evaluate(x.assign_sub(v1))) # Attempt to assign float16 values with self.assertRaisesRegexp( @@ -312,23 +312,23 @@ class AutoCastVariableTest(test.TestCase, parameterized.TestCase): # Assign Python floats self.assertAllClose(0., self.evaluate(x.assign(0.))) - self.assertAllClose(3.14, self.evaluate(x.assign(3.14))) - self.assertAllClose(3.14 * 2, self.evaluate(x.assign_add(3.14))) - self.assertAllClose(3.14, self.evaluate(x.assign_sub(3.14))) + self.assertAllClose(3., self.evaluate(x.assign(3.))) + self.assertAllClose(3. * 2, self.evaluate(x.assign_add(3.))) + self.assertAllClose(3., self.evaluate(x.assign_sub(3.))) # Assign multiple times assign = x.assign(1.) self.assertAllClose(1., self.evaluate(assign)) self.assertAllClose(0., self.evaluate(assign.assign(0.))) - assign_add = x.assign_add(3.14) - self.assertAllClose(3.14, self.evaluate(assign_add)) - self.assertAllClose(3.14 * 3, - self.evaluate(x.assign_add(3.14).assign_add(3.14))) - self.assertAllClose(3.14 * 3, x) - assign_sub = x.assign_sub(3.14) - self.assertAllClose(3.14 * 2, self.evaluate(assign_sub)) + assign_add = x.assign_add(3.) + self.assertAllClose(3., self.evaluate(assign_add)) + self.assertAllClose(3. * 3, + self.evaluate(x.assign_add(3.).assign_add(3.))) + self.assertAllClose(3. * 3, x) + assign_sub = x.assign_sub(3.) + self.assertAllClose(3. * 2, self.evaluate(assign_sub)) self.assertAllClose(0., - self.evaluate(x.assign_sub(3.14).assign_sub(3.14))) + self.evaluate(x.assign_sub(3.).assign_sub(3.))) # Assign with read_value=False self.assertIsNone(self.evaluate(x.assign(1., read_value=False))) @@ -340,10 +340,10 @@ class AutoCastVariableTest(test.TestCase, parameterized.TestCase): # Use the tf.assign functions instead of the var.assign methods. self.assertAllClose(0., self.evaluate(state_ops.assign(x, 0.))) - self.assertAllClose(3.14, self.evaluate(state_ops.assign(x, 3.14))) - self.assertAllClose(3.14 * 2, - self.evaluate(state_ops.assign_add(x, 3.14))) - self.assertAllClose(3.14, self.evaluate(state_ops.assign_sub(x, 3.14))) + self.assertAllClose(3., self.evaluate(state_ops.assign(x, 3.))) + self.assertAllClose(3. * 2, + self.evaluate(state_ops.assign_add(x, 3.))) + self.assertAllClose(3., self.evaluate(state_ops.assign_sub(x, 3.))) run_and_check() # reset x diff --git a/tensorflow/python/keras/mixed_precision/experimental/policy.py b/tensorflow/python/keras/mixed_precision/experimental/policy.py index 77f9563e827..0b809e678a9 100644 --- a/tensorflow/python/keras/mixed_precision/experimental/policy.py +++ b/tensorflow/python/keras/mixed_precision/experimental/policy.py @@ -195,6 +195,10 @@ class Policy(object): Other arguments are not automatically casted for technical reasons, but this may change in a future minor release. + The casting only occurs in TensorFlow 2, but can be enabled if + `tf.compat.v1.disable_v2_behavior()` has been called with + `tf.compat.v1.keras.layers.enable_v2_dtype_behavior()`. + A layer subclass can prevent its inputs from being autocasted by passing `autocast=False` to the layer constructor. For example: @@ -547,7 +551,10 @@ def set_policy(policy): """ global _global_policy if not base_layer_utils.v2_dtype_behavior_enabled(): - raise ValueError('The global policy can only be set in TensorFlow 2') + raise ValueError('The global policy can only be set in TensorFlow 2 or if ' + 'V2 dtype behavior has been set. To enable V2 dtype ' + 'behavior, call ' + '"tf.compat.v1.keras.layers.enable_v2_dtype_behavior()"') if policy is not None and not isinstance(policy, Policy): policy = Policy(policy) is_mixed_policy = policy is not None and policy.should_cast_variables diff --git a/tensorflow/python/keras/saving/saved_model/revive_test.py b/tensorflow/python/keras/saving/saved_model/revive_test.py index 18ac923f6a9..4bd11460181 100644 --- a/tensorflow/python/keras/saving/saved_model/revive_test.py +++ b/tensorflow/python/keras/saving/saved_model/revive_test.py @@ -98,7 +98,7 @@ class CustomLayerNoConfig(keras.layers.Layer): constant_op.constant(1.0, shape=input_shape[1:]), name=self.name+'_c') def call(self, inputs): - self.add_loss(math_ops.reduce_sum(inputs), inputs) + self.add_loss(math_ops.reduce_sum(inputs), inputs=inputs) self.add_metric(self.sum_metric(inputs)) self.add_metric(inputs, aggregation='mean', name='mean') diff --git a/tensorflow/python/keras/saving/saved_model/saved_model_test.py b/tensorflow/python/keras/saving/saved_model/saved_model_test.py index 6c2037c4f4b..9cbe8607a54 100644 --- a/tensorflow/python/keras/saving/saved_model/saved_model_test.py +++ b/tensorflow/python/keras/saving/saved_model/saved_model_test.py @@ -86,7 +86,7 @@ class LayerWithLearningPhase(keras.engine.base_layer.Layer): class LayerWithLoss(keras.layers.Layer): def call(self, inputs): - self.add_loss(math_ops.reduce_sum(inputs), inputs) + self.add_loss(math_ops.reduce_sum(inputs), inputs=inputs) return inputs * 2 diff --git a/tensorflow/python/keras/saving/saved_model/utils.py b/tensorflow/python/keras/saving/saved_model/utils.py index fee35999b92..dedcea02a4f 100644 --- a/tensorflow/python/keras/saving/saved_model/utils.py +++ b/tensorflow/python/keras/saving/saved_model/utils.py @@ -69,7 +69,7 @@ def use_wrapped_call(layer, call_fn, default_training_value=None, inputs = args[inputs_arg_index] args = args[inputs_arg_index + 1:] outputs, losses = fn(inputs, *args, **kwargs) - layer.add_loss(losses, inputs) + layer.add_loss(losses, inputs=inputs) # TODO(kathywu): This is a temporary hack. When a network of layers is # revived from SavedModel, only the top-level layer will have losses. This diff --git a/tensorflow/python/keras/tests/add_loss_correctness_test.py b/tensorflow/python/keras/tests/add_loss_correctness_test.py index 26a799e0f83..323a2626c15 100644 --- a/tensorflow/python/keras/tests/add_loss_correctness_test.py +++ b/tensorflow/python/keras/tests/add_loss_correctness_test.py @@ -340,7 +340,7 @@ class TestAddLossCorrectness(keras_parameterized.TestCase): class LayerWithLoss(layers.Layer): def call(self, inputs): - self.add_loss(math_ops.reduce_sum(inputs), inputs) + self.add_loss(math_ops.reduce_sum(inputs), inputs=inputs) return inputs * 2 shared_layer = LayerWithLoss() @@ -357,7 +357,7 @@ class TestAddLossCorrectness(keras_parameterized.TestCase): class LayerWithLoss(layers.Layer): def call(self, inputs): - self.add_loss(math_ops.reduce_sum(inputs), inputs) + self.add_loss(math_ops.reduce_sum(inputs), inputs=inputs) return inputs * 2 class LayerWithNestedLayerWithLoss(layers.Layer): diff --git a/tensorflow/python/keras/utils/data_utils.py b/tensorflow/python/keras/utils/data_utils.py index 1cf27f8fb65..6c0122cdf72 100644 --- a/tensorflow/python/keras/utils/data_utils.py +++ b/tensorflow/python/keras/utils/data_utils.py @@ -48,6 +48,7 @@ from six.moves.urllib.request import urlopen from tensorflow.python.keras.utils.generic_utils import Progbar from tensorflow.python.keras.utils.io_utils import path_to_string from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import deprecation from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export @@ -552,6 +553,8 @@ def init_pool(seqs): _SHARED_SEQUENCES = seqs +@deprecation.deprecated('2020-06-07', 'Please manage pools using the standard ' + 'Python lib.') @keras_export('keras.experimental.terminate_keras_multiprocessing_pools') def terminate_keras_multiprocessing_pools(grace_period=0.1, use_sigkill=False): """Destroy Keras' multiprocessing pools to prevent deadlocks. diff --git a/tensorflow/python/lib/core/bfloat16.cc b/tensorflow/python/lib/core/bfloat16.cc index 42b248a7ddb..d165c47910b 100644 --- a/tensorflow/python/lib/core/bfloat16.cc +++ b/tensorflow/python/lib/core/bfloat16.cc @@ -313,11 +313,15 @@ PyTypeObject PyBfloat16_Type = { #else PyVarObject_HEAD_INIT(nullptr, 0) #endif - "bfloat16", // tp_name - sizeof(PyBfloat16), // tp_basicsize - 0, // tp_itemsize - nullptr, // tp_dealloc - 0, // tp_print + "bfloat16", // tp_name + sizeof(PyBfloat16), // tp_basicsize + 0, // tp_itemsize + nullptr, // tp_dealloc +#if PY_VERSION_HEX < 0x03080000 + nullptr, // tp_print +#else + 0, // tp_vectorcall_offset +#endif nullptr, // tp_getattr nullptr, // tp_setattr nullptr, // tp_compare / tp_reserved diff --git a/tensorflow/python/ops/embedding_ops.py b/tensorflow/python/ops/embedding_ops.py index cb41802a516..2fdae49b1f6 100644 --- a/tensorflow/python/ops/embedding_ops.py +++ b/tensorflow/python/ops/embedding_ops.py @@ -36,7 +36,6 @@ from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variables from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_tensor -from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export @@ -258,7 +257,7 @@ def embedding_lookup( name=None, validate_indices=True, # pylint: disable=unused-argument max_norm=None): - """Looks up `ids` in a list of embedding tensors. + """Looks up embeddings for the given `ids` from a list of tensors. This function is used to perform parallel lookups on the list of tensors in `params`. It is a generalization of `tf.gather`, where `params` is @@ -329,36 +328,28 @@ def embedding_lookup( @tf_export("nn.embedding_lookup", v1=[]) def embedding_lookup_v2(params, ids, max_norm=None, name=None): - """Looks up `ids` in a list of embedding tensors. + """Looks up embeddings for the given `ids` from a list of tensors. - This function is used to perform parallel lookups on the list of - tensors in `params`. It is a generalization of - `tf.gather`, where `params` is - interpreted as a partitioning of a large embedding tensor. `params` may be - a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()` - with a - partitioner. + This function is used to perform parallel lookups on the list of tensors in + `params`. It is a generalization of `tf.gather`, where `params` is + interpreted as a partitioning of a large embedding tensor. - If `len(params) > 1`, each element `id` of `ids` is partitioned between - the elements of `params` according to the `partition_strategy`. - In all strategies, if the id space does not evenly divide the number of - partitions, each of the first `(max_id + 1) % len(params)` partitions will - be assigned one more id. - - The `partition_strategy` is always `"div"` currently. This means that we + If `len(params) > 1`, each element `id` of `ids` is partitioned between the + elements of `params` according to the "div" partition strategy, which means we assign ids to partitions in a contiguous manner. For instance, 13 ids are split across 5 partitions as: - `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]` + `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`. + + If the id space does not evenly divide the number of partitions, each of the + first `(max_id + 1) % len(params)` partitions will be assigned one more id. The results of the lookup are concatenated into a dense tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`. Args: params: A single tensor representing the complete embedding tensor, or a - list of P tensors all of same shape except for the first dimension, - representing sharded embedding tensors. Alternatively, a - `PartitionedVariable`, created by partitioning along dimension 0. Each - element must be appropriately sized for the 'div' `partition_strategy`. + list of tensors all of same shape except for the first dimension, + representing sharded embedding tensors following "div" partition strategy. ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked up in `params`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger @@ -368,6 +359,32 @@ def embedding_lookup_v2(params, ids, max_norm=None, name=None): Returns: A `Tensor` with the same type as the tensors in `params`. + For instance, if `params` is a 5x2 matrix: + + ```python + [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + ``` + + or a list of matrices: + + ```python + params[0]: [[1, 2], [3, 4]] + params[1]: [[5, 6], [7, 8]] + params[2]: [[9, 10]] + ``` + + and `ids` is: + + ```python + [0, 3, 4] + ``` + + The output will be a 3x2 matrix: + + ```python + [[1, 2], [7, 8], [9, 10]] + ``` + Raises: ValueError: If `params` is empty. """ @@ -382,19 +399,22 @@ def embedding_lookup_sparse(params, name=None, combiner=None, max_norm=None): - """Computes embeddings for the given ids and weights. + """Looks up embeddings for the given ids and weights from a list of tensors. This op assumes that there is at least one id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. + `sp_ids` and `sp_weights` (if not None) are `SparseTensor`s with rank of 2. + Embeddings are always aggregated along the last dimension. + It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. Args: params: A single tensor representing the complete embedding tensor, or a - list of P tensors all of same shape except for the first dimension, - representing sharded embedding tensors. Alternatively, a + list tensors all of same shape except for the first dimension, + representing sharded embedding tensors. Alternatively, a `PartitionedVariable`, created by partitioning along dimension 0. Each element must be appropriately sized for the given `partition_strategy`. sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size @@ -410,7 +430,7 @@ def embedding_lookup_sparse(params, and "sum" are supported. "sum" computes the weighted sum of the embedding results for each row. "mean" is the weighted sum divided by the total weight. "sqrtn" is the weighted sum divided by the square root of the sum - of the squares of the weights. + of the squares of the weights. Defaults to `mean`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. @@ -426,11 +446,11 @@ def embedding_lookup_sparse(params, and - `shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]` + `shape(sp_ids) = shape(sp_weights) = [d0, d1]` then - `shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]`. + `shape(output) = [d0, p1, ..., pm]`. For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are @@ -455,8 +475,6 @@ def embedding_lookup_sparse(params, ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}. """ if combiner is None: - logging.warn("The default value of combiner will change from \"mean\" " - "to \"sqrtn\" after 2016/11/01.") combiner = "mean" if combiner not in ("mean", "sqrtn", "sum"): raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'") @@ -562,21 +580,31 @@ def embedding_lookup_sparse_v2(params, combiner=None, max_norm=None, name=None): - """Computes embeddings for the given ids and weights. + """Looks up embeddings for the given ids and weights from a list of tensors. This op assumes that there is at least one id for each row in the dense tensor represented by sp_ids (i.e. there are no rows with empty features), and that all the indices of sp_ids are in canonical row-major order. + `sp_ids` and `sp_weights` (if not None) are `SparseTensor`s with rank of 2. + Embeddings are always aggregated along the last dimension. + It also assumes that all id values lie in the range [0, p0), where p0 is the sum of the size of params along dimension 0. + If `len(params) > 1`, each element of `sp_ids` is partitioned between the + elements of `params` according to the "div" partition strategy, which means we + assign ids to partitions in a contiguous manner. For instance, 13 ids are + split across 5 partitions as: + `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`. + + If the id space does not evenly divide the number of partitions, each of the + first `(max_id + 1) % len(params)` partitions will be assigned one more id. + Args: params: A single tensor representing the complete embedding tensor, or a - list of P tensors all of same shape except for the first dimension, - representing sharded embedding tensors. Alternatively, a - `PartitionedVariable`, created by partitioning along dimension 0. Each - element must be appropriately sized for ``"div"`` `partition_strategy`. + list of tensors all of same shape except for the first dimension, + representing sharded embedding tensors following "div" partition strategy. sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size and M is arbitrary. sp_weights: either a `SparseTensor` of float / double weights, or `None` to @@ -586,7 +614,7 @@ def embedding_lookup_sparse_v2(params, and "sum" are supported. "sum" computes the weighted sum of the embedding results for each row. "mean" is the weighted sum divided by the total weight. "sqrtn" is the weighted sum divided by the square root of the sum - of the squares of the weights. + of the squares of the weights. Defaults to `mean`. max_norm: If not `None`, each embedding is clipped if its l2-norm is larger than this value, before combining. name: Optional name for the op. @@ -603,11 +631,11 @@ def embedding_lookup_sparse_v2(params, and - `shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]` + `shape(sp_ids) = shape(sp_weights) = [d0, d1]` then - `shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]`. + `shape(output) = [d0, p1, ..., pm]`. For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are @@ -647,10 +675,7 @@ def safe_embedding_lookup_sparse_v2(embedding_weights, The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the - vocabulary size is not necessarily a multiple of `P`. `embedding_weights` - may be a `PartitionedVariable` as returned by using - `tf.compat.v1.get_variable()` with a - partitioner. + vocabulary size is not necessarily a multiple of num of shards. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector @@ -659,16 +684,21 @@ def safe_embedding_lookup_sparse_v2(embedding_weights, The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. - Note: when doing embedding lookup on `embedding_weights`, "div" partition - strategy will be used. Support for other partition strategy will be added - later. + If `len(embedding_weights) > 1`, each element `id` of `ids` is partitioned + between the elements of `embedding_weights` according to the "div" partition + strategy, which means we assign ids to partitions in a contiguous manner. For + instance, 13 ids are split across 5 partitions as: + `[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`. + + If the id space does not evenly divide the number of partitions, each of the + first `(max_id + 1) % len(embedding_weights)` partitions will be assigned one + more id. Args: - embedding_weights: A list of `P` float `Tensor`s or values representing - partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable` - created by partitioning along dimension 0. The total unpartitioned shape - should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size - and `e_1, ..., e_m` are the embedding dimensions. + embedding_weights: A single tensor representing the complete embedding + tensor, or a list of tensors all of same shape except for the first + dimension, representing sharded embedding tensors following "div" + partition strategy. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing @@ -677,13 +707,48 @@ def safe_embedding_lookup_sparse_v2(embedding_weights, combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. - default_id: The id to use for an entry with no features. + default_id: The id to use for an entry with no features. Defaults to + 0-vector. max_norm: If not `None`, all embeddings are l2-normalized to max_norm before combining. name: A name for this operation (optional). Returns: - Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`. + A dense tensor representing the combined embeddings for the + sparse ids. For each row in the dense tensor represented by `sparse_ids`, + the op looks up the embeddings for all ids in that row, multiplies them by + the corresponding weight, and combines these embeddings as specified. + + In other words, if + + `shape(combined embedding_weights) = [p0, p1, ..., pm]` + + and + + `shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]` + + then + + `shape(output) = [d0, d1, ... dn-1, p1, ..., pm]`. + + For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are + + ```python + [0, 0]: id 1, weight 2.0 + [0, 1]: id 3, weight 0.5 + [1, 0]: id -1, weight 1.0 + [2, 3]: id 1, weight 3.0 + ``` + + `default_id` is 0. + + with `combiner`="mean", then the output will be a 3x20 matrix where + + ```python + output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) + output[1, :] = (params[0, :] * 1.0) / 1.0 + output[2, :] = (params[1, :] * 3.0) / 3.0 + ``` Raises: ValueError: if `embedding_weights` is empty. @@ -725,11 +790,11 @@ def safe_embedding_lookup_sparse(embedding_weights, along the last dimension. Args: - embedding_weights: A list of `P` float `Tensor`s or values representing - partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable` - created by partitioning along dimension 0. The total unpartitioned shape - should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size - and `e_1, ..., e_m` are the embedding dimensions. + embedding_weights: A single tensor representing the complete embedding + tensor, or a list tensors all of same shape except for the first + dimension, representing sharded embedding tensors. Alternatively, a + `PartitionedVariable`, created by partitioning along dimension 0. Each + element must be appropriately sized for the given `partition_strategy`. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing @@ -746,7 +811,41 @@ def safe_embedding_lookup_sparse(embedding_weights, combining. Returns: - Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`. + A dense tensor representing the combined embeddings for the + sparse ids. For each row in the dense tensor represented by `sp_ids`, the op + looks up the embeddings for all ids in that row, multiplies them by the + corresponding weight, and combines these embeddings as specified. + + In other words, if + + `shape(combined embedding_weights) = [p0, p1, ..., pm]` + + and + + `shape(sparse_ids) = shape(sparse_weights) = [d0, d1, ..., dn]` + + then + + `shape(output) = [d0, d1, ... dn-1, p1, ..., pm]`. + + For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are + + ```python + [0, 0]: id 1, weight 2.0 + [0, 1]: id 3, weight 0.5 + [1, 0]: id -1, weight 1.0 + [2, 3]: id 1, weight 3.0 + ``` + + `default_id` is 0. + + with `combiner`="mean", then the output will be a 3x20 matrix where + + ```python + output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5) + output[1, :] = (params[0, :] * 1.0) / 1.0 + output[2, :] = (params[1, :] * 3.0) / 3.0 + ``` Raises: ValueError: if `embedding_weights` is empty. diff --git a/tensorflow/python/ops/parallel_for/control_flow_ops.py b/tensorflow/python/ops/parallel_for/control_flow_ops.py index 5339af538fd..a7649778161 100644 --- a/tensorflow/python/ops/parallel_for/control_flow_ops.py +++ b/tensorflow/python/ops/parallel_for/control_flow_ops.py @@ -195,7 +195,7 @@ def pfor(loop_fn, iters, fallback_to_while_loop=True, parallel_iterations=None): if functions_run_eagerly: logging.warning( "It looks like tf.function behavior was disabled, perhaps using " - "tf.config.experimental_run_functions_eagerly. Vectorization " + "tf.config.run_functions_eagerly. Vectorization " "primitives (e.g. tf.vectorized_map) require tf.function to work. " "These primitives will override the disable.") def_function.run_functions_eagerly(False) diff --git a/tensorflow/python/ops/structured/structured_tensor.py b/tensorflow/python/ops/structured/structured_tensor.py index a75364df659..6234e21d8fc 100644 --- a/tensorflow/python/ops/structured/structured_tensor.py +++ b/tensorflow/python/ops/structured/structured_tensor.py @@ -1089,7 +1089,7 @@ def _partition_outer_dimension(value, row_partition): nrows = row_partition.static_nrows ncols = row_partition.static_uniform_row_length shape = tensor_shape.TensorShape([nrows, ncols]).concatenate( - value.shape[2:]) + value.shape[1:]) fields = dict((k, _partition_outer_dimension(v, row_partition)) for (k, v) in value._fields.items()) return StructuredTensor( diff --git a/tensorflow/python/ops/structured/structured_tensor_test.py b/tensorflow/python/ops/structured/structured_tensor_test.py index e2d6a161641..0f2ac2c83e1 100644 --- a/tensorflow/python/ops/structured/structured_tensor_test.py +++ b/tensorflow/python/ops/structured/structured_tensor_test.py @@ -523,6 +523,16 @@ class StructuredTensorTest(test_util.TensorFlowTestCase, "x": tensor_spec.TensorSpec([2, 2], dtypes.int32), "y": ragged_tensor.RaggedTensorSpec([2, 2, None], dtypes.int32)}))) + def testPartitionOuterDimension3(self): + rt = ragged_tensor.RaggedTensor.from_value_rowids( + array_ops.constant([[1, 2], [3, 4], [5, 6]]), [0, 0, 1]) + struct = structured_tensor.StructuredTensor.from_fields({"r": rt}, [2]) + struct_2 = struct.partition_outer_dimension( + row_partition.RowPartition.from_row_splits([0, 1, 2])) + struct_3 = struct_2.partition_outer_dimension( + row_partition.RowPartition.from_row_splits([0, 1, 2])) + self.assertEqual(3, struct_3.rank) + def testPartitionOuterDimsErrors(self): st = StructuredTensor.from_fields({}) partition = row_partition.RowPartition.from_row_splits([0]) @@ -889,6 +899,18 @@ class StructuredTensorTest(test_util.TensorFlowTestCase, result = st.merge_dims(outer_axis, inner_axis) self.assertAllEqual(result, expected) + def testMergeDims_0_1(self): + rt = ragged_tensor.RaggedTensor.from_value_rowids( + array_ops.constant([[1, 2], [3, 4], [5, 6]]), [0, 0, 1]) + struct = StructuredTensor.from_fields({"r": rt}, [2]) + struct_2 = struct.partition_outer_dimension( + row_partition.RowPartition.from_row_splits([0, 1, 2])) + struct_3 = struct_2.partition_outer_dimension( + row_partition.RowPartition.from_row_splits([0, 1, 2])) + self.assertLen(struct_3.row_partitions, 2) + merged = struct_3.merge_dims(0, 1) + self.assertLen(merged.row_partitions, 1) + def testMergeDimsError(self): st = StructuredTensor.from_pyval([[[{"a": 5}]]]) with self.assertRaisesRegexp( diff --git a/tensorflow/python/saved_model/load.py b/tensorflow/python/saved_model/load.py index 16f1ebde82a..e0fbb7db270 100644 --- a/tensorflow/python/saved_model/load.py +++ b/tensorflow/python/saved_model/load.py @@ -324,7 +324,10 @@ class Loader(object): restore_ops = position.restore_ops() if restore_ops: if resource_variable_ops.is_resource_variable(obj): - obj._initializer_op = restore_ops + if len(restore_ops) == 1: + obj._initializer_op = restore_ops[0] + else: + obj._initializer_op = control_flow_ops.group(*restore_ops) elif isinstance(obj, lookup_ops.LookupInterface): # We don't need to check for eager execution here, since this code # path should only be taken if we are restoring in graph mode. diff --git a/tensorflow/python/saved_model/load_test.py b/tensorflow/python/saved_model/load_test.py index 0f79878cb6e..462f6f50f11 100644 --- a/tensorflow/python/saved_model/load_test.py +++ b/tensorflow/python/saved_model/load_test.py @@ -30,6 +30,7 @@ from absl.testing import parameterized from tensorflow.python.client import session as session_lib from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import backprop +from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import test from tensorflow.python.eager import wrap_function @@ -104,14 +105,24 @@ class LoadTest(test.TestCase, parameterized.TestCase): self.assertIs(imported.dep_three, imported.dep_two.dep) self.assertIsNot(imported.dep_one, imported.dep_two) + @test_util.run_in_graph_and_eager_modes def test_variables(self, cycles): root = tracking.AutoTrackable() root.v1 = variables.Variable(1., trainable=True) root.v2 = variables.Variable(2., trainable=False) - imported = cycle(root, cycles) - self.assertEqual(imported.v1.numpy(), 1.0) + self.evaluate([root.v1.initializer, root.v2.initializer]) + + for _ in range(cycles): + imported = cycle(root, 1) + self.evaluate([imported.v1.initializer, imported.v2.initializer]) + + if not context.executing_eagerly(): + self.assertIsInstance(imported.v1.initializer, ops.Operation) + self.assertIsInstance(imported.v2.initializer, ops.Operation) + + self.assertEqual(self.evaluate(imported.v1), 1.0) self.assertTrue(imported.v1.trainable) - self.assertEqual(imported.v2.numpy(), 2.0) + self.assertEqual(self.evaluate(imported.v2), 2.0) self.assertFalse(imported.v2.trainable) def test_variables_name(self, cycles): diff --git a/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py b/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py index 657659d580c..d4857677046 100644 --- a/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py +++ b/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py @@ -289,16 +289,17 @@ class CheckpointingTests(test.TestCase): functools.partial(model, input_value), global_step=root.optimizer_step) - for training_continuation in range(3): - strategy = mirrored_strategy.MirroredStrategy() - with strategy.scope(): + strategy = mirrored_strategy.MirroredStrategy() + with strategy.scope(): + for training_continuation in range(3): model = MyModel() optimizer = adam.AdamOptimizer(0.001) root = trackable_utils.Checkpoint( - optimizer=optimizer, model=model, + optimizer=optimizer, + model=model, optimizer_step=training_util.get_or_create_global_step()) - root.restore(checkpoint_management.latest_checkpoint( - checkpoint_directory)) + root.restore( + checkpoint_management.latest_checkpoint(checkpoint_directory)) for _ in range(num_training_steps): strategy.extended.call_for_each_replica( diff --git a/tensorflow/tools/api/golden/v1/tensorflow.config.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.config.pbtxt index 7876afae9a4..bf0c669cd93 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.config.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.config.pbtxt @@ -40,6 +40,10 @@ tf_module { name: "experimental_run_functions_eagerly" argspec: "args=[\'run_eagerly\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "functions_run_eagerly" + argspec: "args=[], varargs=None, keywords=None, defaults=None" + } member_method { name: "get_logical_device_configuration" argspec: "args=[\'device\'], varargs=None, keywords=None, defaults=None" @@ -60,6 +64,10 @@ tf_module { name: "list_physical_devices" argspec: "args=[\'device_type\'], varargs=None, keywords=None, defaults=[\'None\'], " } + member_method { + name: "run_functions_eagerly" + argspec: "args=[\'run_eagerly\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_logical_device_configuration" argspec: "args=[\'device\', \'logical_devices\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.-model.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.-model.pbtxt index 791362e93c0..272396239d7 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.-model.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.-model.pbtxt @@ -139,7 +139,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.-sequential.pbtxt index fe05efe7534..8979491971f 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.-sequential.pbtxt @@ -144,7 +144,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-linear-model.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-linear-model.pbtxt index f8a6594d1e8..448ea60cc0f 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-linear-model.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-linear-model.pbtxt @@ -140,7 +140,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-peephole-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-peephole-l-s-t-m-cell.pbtxt index 3b12b4e8055..ee5e5b884a2 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-peephole-l-s-t-m-cell.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-peephole-l-s-t-m-cell.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-sequence-features.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-sequence-features.pbtxt index 578fbf03f77..a51aa88ae23 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-sequence-features.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-sequence-features.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-wide-deep-model.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-wide-deep-model.pbtxt index 40e58253bb4..8e1d9927434 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-wide-deep-model.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.experimental.-wide-deep-model.pbtxt @@ -140,7 +140,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-abstract-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-abstract-r-n-n-cell.pbtxt index 3ba96bab6fe..8c80da861f6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-abstract-r-n-n-cell.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-abstract-r-n-n-cell.pbtxt @@ -125,7 +125,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activation.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activation.pbtxt index 3f59d9987a5..ef8efd606c4 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activation.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activation.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activity-regularization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activity-regularization.pbtxt index acc72ebf939..60578d2cc59 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activity-regularization.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-activity-regularization.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-add.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-add.pbtxt index 839d57e4c94..3bd1f2c7623 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-add.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-add.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-additive-attention.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-additive-attention.pbtxt index 1c22721666b..f6f8d3914b4 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-additive-attention.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-additive-attention.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-alpha-dropout.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-alpha-dropout.pbtxt index cf883e74088..3e408e96036 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-alpha-dropout.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-alpha-dropout.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-attention.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-attention.pbtxt index 70800bccf8c..4197c1a88f6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-attention.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-attention.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling1-d.pbtxt index 11f70522f1a..153a801e1d8 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling2-d.pbtxt index ff311806b47..66e261111ae 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling3-d.pbtxt index dc3cc76d9e1..b247490b067 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average-pooling3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average.pbtxt index 6fdcb8c9000..0f1808332c5 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-average.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool1-d.pbtxt index a5d912c9b8e..567143eb41d 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool2-d.pbtxt index 7471b7306d3..56a2db85419 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool3-d.pbtxt index 323c0d51988..c0ab32fd7c2 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-avg-pool3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-batch-normalization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-batch-normalization.pbtxt index b5de4b0e7a0..5ff17a5b422 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-batch-normalization.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-batch-normalization.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-bidirectional.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-bidirectional.pbtxt index 16143b3b20e..ccb55ec0c52 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-bidirectional.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-bidirectional.pbtxt @@ -122,7 +122,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-concatenate.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-concatenate.pbtxt index 2bea88de2fd..c44ff9e48e2 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-concatenate.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-concatenate.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt index 444220d4e06..43112cfe785 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt @@ -207,7 +207,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d-transpose.pbtxt index 22de9fb79ff..3a592d713bc 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d.pbtxt index b45954626ba..cb2f7f03e56 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d-transpose.pbtxt index da6bfec7499..535243a2224 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d.pbtxt index b66d4fc4d3c..9d847c759a1 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d-transpose.pbtxt index 4e9ce619361..afcc8822af6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d.pbtxt index fedb39dbd21..68cbf32998e 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-conv3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d-transpose.pbtxt index 28357ffa0f6..d81e4546670 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d.pbtxt index 6d97faacece..76d66200fbc 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt index 830caf7f693..c1f49885d87 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d.pbtxt index df115f618c7..8d874ede685 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt index 69f71b6a3ff..5d1d6d04505 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d.pbtxt index f58aa3e1baa..f97c7617dbd 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-convolution3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping1-d.pbtxt index 44b66135732..29c0cd34098 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping1-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping2-d.pbtxt index 63591c0e984..b1ecb7d1204 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping2-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping3-d.pbtxt index b5e96804759..1e0fe6e6cf8 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cropping3-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt index c11ee1eea4c..4a2a4d19048 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-g-r-u.pbtxt @@ -127,7 +127,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt index a2a805817fd..282d24fe9e7 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-cu-d-n-n-l-s-t-m.pbtxt @@ -127,7 +127,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense-features.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense-features.pbtxt index f816c00d9d5..ecda1603325 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense-features.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense-features.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense.pbtxt index 31b101ce81b..025c35eca17 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dense.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt index 46138e74b4b..d15459798cf 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dot.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dot.pbtxt index 4f45a085317..777248192c6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dot.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dot.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dropout.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dropout.pbtxt index 869d8d4817b..fe114648bff 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dropout.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-dropout.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-e-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-e-l-u.pbtxt index 33a95bd2312..19429711e80 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-e-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-e-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-embedding.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-embedding.pbtxt index 35c25eab279..5ac35db6734 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-embedding.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-embedding.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-flatten.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-flatten.pbtxt index 955ec7a0a49..ff17ea72d45 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-flatten.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-flatten.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u-cell.pbtxt index 02dc67771b7..146333b09f1 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u-cell.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u-cell.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u.pbtxt index 939dde608aa..86fb73d68f7 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-g-r-u.pbtxt @@ -190,7 +190,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-dropout.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-dropout.pbtxt index b966a1fa48a..8cfe9f9c692 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-dropout.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-dropout.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-noise.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-noise.pbtxt index bcadf04ab46..a64897f8849 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-noise.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-gaussian-noise.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt index 93f9f085028..7363d9d6521 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt index c1988faf3d7..58a08cd2d94 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt index 516e93110c5..1ec5624d8bf 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt index 545af759275..7931f0deb12 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt index 13fc0dade36..6db66c8ba9e 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt index 5c6515f166d..ffd750c0522 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool1-d.pbtxt index 27bde045cbd..11762c021a9 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool2-d.pbtxt index 21ee43eb016..08043cb2926 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool3-d.pbtxt index 14fac4a4edd..f4155ac58aa 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pool3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt index 0cc18b9a462..6be5e6d4dea 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt index cb26f965881..69719674f2f 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt index aef01152cfe..624163caa84 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-layer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-layer.pbtxt index 6366a29f0b9..39e79f7980b 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-layer.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-input-layer.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt index a15b042d96e..4f88e672708 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m.pbtxt index 975df5f3b1e..6a799057db0 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-l-s-t-m.pbtxt @@ -190,7 +190,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-lambda.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-lambda.pbtxt index 14b809390eb..22fa730112f 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-lambda.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-lambda.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer-normalization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer-normalization.pbtxt index f1adf9b2178..d8e9445b8cb 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer-normalization.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer-normalization.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer.pbtxt index 2dcb55a3331..cdc76a45594 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-layer.pbtxt @@ -116,7 +116,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-leaky-re-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-leaky-re-l-u.pbtxt index 85b4a635d9e..06ffbc8fdf3 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-leaky-re-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-leaky-re-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt index bb4c63d4289..c2826298321 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected1-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt index 8068baf2931..da6934bae44 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-locally-connected2-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-masking.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-masking.pbtxt index 775cc8f4458..205bf1ed369 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-masking.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-masking.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool1-d.pbtxt index 8fd7d059937..df8c2dcd736 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool2-d.pbtxt index aadaea15b7b..20a2d5162f4 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool3-d.pbtxt index ea1c60e48d3..0bddc075006 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pool3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling1-d.pbtxt index b9f09656973..ac7827999ef 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling2-d.pbtxt index ade1e839676..f3ae4bb7e5e 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling3-d.pbtxt index 2d129d415da..419b64d142f 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-max-pooling3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-maximum.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-maximum.pbtxt index b4adbbcbea2..6535a951a1e 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-maximum.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-maximum.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-minimum.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-minimum.pbtxt index 12d2cc690b8..d54b4d1bb60 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-minimum.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-minimum.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-multiply.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-multiply.pbtxt index 5e5d3992927..4dee52c2ac6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-multiply.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-multiply.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-p-re-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-p-re-l-u.pbtxt index 733fb63d1fb..84025572e83 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-p-re-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-p-re-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-permute.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-permute.pbtxt index 3e2d70a5a0a..9483167cb23 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-permute.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-permute.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-r-n-n.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-r-n-n.pbtxt index 3018929154e..7143160bed9 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-r-n-n.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-r-n-n.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-re-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-re-l-u.pbtxt index 7af41433d28..31b6b03c1b8 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-re-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-re-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-repeat-vector.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-repeat-vector.pbtxt index 52eb2c247cf..e5295928656 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-repeat-vector.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-repeat-vector.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-reshape.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-reshape.pbtxt index 08658b26be3..8b4773bc4f2 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-reshape.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-reshape.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv1-d.pbtxt index 9bab5a78338..dae9b58bc55 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv1-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv2-d.pbtxt index 2bcc06f9330..53ee61ca723 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-conv2-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution1-d.pbtxt index 823e28a8bb9..28935f62922 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution1-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution2-d.pbtxt index c27047ecd71..8c00f85609f 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-separable-convolution2-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt index 417e79df321..068788d6b34 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n.pbtxt index e6e12106c6c..ddb87d74337 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-simple-r-n-n.pbtxt @@ -178,7 +178,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-softmax.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-softmax.pbtxt index 8b435bd2b41..cc5165ea47a 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-softmax.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-softmax.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt index d5fbff4d5c6..9fff96d8764 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt index 287e0167076..24fbd03ee49 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt index 78ab93ae395..50ec54308c9 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt index 27afe1a56c6..9de71f557f6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt @@ -125,7 +125,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-subtract.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-subtract.pbtxt index b060c3169fd..05bbc5ad1be 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-subtract.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-subtract.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt index 272fd09afc6..2d34bf8754c 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-time-distributed.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-time-distributed.pbtxt index 95274944084..c153411811f 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-time-distributed.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-time-distributed.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling1-d.pbtxt index 8c8f4f287bd..07ca8e40761 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling1-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt index c56ea3122ed..f5b5f8bbf85 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling2-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling3-d.pbtxt index 80c647c9fc1..acfc1a33cfb 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-up-sampling3-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-wrapper.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-wrapper.pbtxt index 63423b9ee0c..58082daa2fa 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-wrapper.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-wrapper.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding1-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding1-d.pbtxt index e5a31b88df9..40e7a43ad53 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding1-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding1-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding2-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding2-d.pbtxt index b170d030fe8..473feb798f3 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding2-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding2-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding3-d.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding3-d.pbtxt index 6010e155661..ab8fca29714 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding3-d.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.-zero-padding3-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.-random-fourier-features.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.-random-fourier-features.pbtxt index b0458f8e8f1..cadf62e0d37 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.-random-fourier-features.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.-random-fourier-features.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt index 4a846b138a9..5640a4d1dcd 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt index 9feb216577a..88a89805ec9 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt @@ -125,7 +125,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt index d84d810bdd0..305b239c3e6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt index 44da86f424c..3fc5402fb39 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt index 29a82a1eab0..250880c9ae8 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt index 6dc385a78e2..39cd6af00a0 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt index 52cec4506fa..ce654bd1537 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt index 7096d9456d5..95c9cb2dd73 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt index 7e4089fd9e7..92dfa72a7a5 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt index 0d982f20e55..88108bfe9aa 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt index 7816930fd5c..7036fb926a8 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt index 05f110140bf..5313dfe9907 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt index ab5067a23ca..47852865558 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt @@ -125,7 +125,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.pbtxt index 10dbfb56078..ea139297807 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.pbtxt @@ -448,10 +448,18 @@ tf_module { name: "deserialize" argspec: "args=[\'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], " } + member_method { + name: "disable_v2_dtype_behavior" + argspec: "args=[], varargs=None, keywords=None, defaults=None" + } member_method { name: "dot" argspec: "args=[\'inputs\', \'axes\', \'normalize\'], varargs=None, keywords=kwargs, defaults=[\'False\'], " } + member_method { + name: "enable_v2_dtype_behavior" + argspec: "args=[], varargs=None, keywords=None, defaults=None" + } member_method { name: "maximum" argspec: "args=[\'inputs\'], varargs=None, keywords=kwargs, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-a-u-c.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-a-u-c.pbtxt index 5e263f72a03..56704ace966 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-a-u-c.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-a-u-c.pbtxt @@ -122,7 +122,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-accuracy.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-accuracy.pbtxt index d211a16597e..fb970c23732 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-binary-accuracy.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-binary-accuracy.pbtxt index 58103637fe3..09863e42eb1 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-binary-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-binary-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-binary-crossentropy.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-binary-crossentropy.pbtxt index 4f748914101..eb033ce30a5 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-binary-crossentropy.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-binary-crossentropy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-accuracy.pbtxt index 42e57f86769..9de555e3427 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-crossentropy.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-crossentropy.pbtxt index 6ef136de517..fa41859b37e 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-crossentropy.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-crossentropy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-hinge.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-hinge.pbtxt index f3379748fb9..3ebaddb0e58 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-hinge.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-categorical-hinge.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-cosine-similarity.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-cosine-similarity.pbtxt index 9367edcb228..7d8eafeb393 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-cosine-similarity.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-cosine-similarity.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-false-negatives.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-false-negatives.pbtxt index 820d2ed1e7c..3fa0db2af91 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-false-negatives.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-false-negatives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-false-positives.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-false-positives.pbtxt index de23747dcef..4e2a380445e 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-false-positives.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-false-positives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-hinge.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-hinge.pbtxt index edae2d27448..66e416d57f6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-hinge.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-hinge.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-k-l-divergence.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-k-l-divergence.pbtxt index 171ade560a9..6fbbe6b9336 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-k-l-divergence.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-k-l-divergence.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-log-cosh-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-log-cosh-error.pbtxt index 8713d9aa427..f7f8f79eb17 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-log-cosh-error.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-log-cosh-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-absolute-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-absolute-error.pbtxt index 6c5541e71d9..adaf33d3608 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-absolute-error.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-absolute-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-absolute-percentage-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-absolute-percentage-error.pbtxt index 45b94842278..2f743849a8b 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-absolute-percentage-error.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-absolute-percentage-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-io-u.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-io-u.pbtxt index 90733200606..26fe404372d 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-io-u.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-io-u.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-relative-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-relative-error.pbtxt index 5997066e37a..1d3eae22f8c 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-relative-error.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-relative-error.pbtxt @@ -120,7 +120,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-squared-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-squared-error.pbtxt index b6a0e00ffa0..3fe23a73576 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-squared-error.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-squared-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-squared-logarithmic-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-squared-logarithmic-error.pbtxt index b698ab5ff65..2b98c31a6c7 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-squared-logarithmic-error.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-squared-logarithmic-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-tensor.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-tensor.pbtxt index 01a3d3f6e07..772bf62a923 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-tensor.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean-tensor.pbtxt @@ -126,7 +126,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean.pbtxt index c47a1bc749c..bc14d53dbee 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-mean.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-metric.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-metric.pbtxt index fe72d0ad1d6..8a6977835a0 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-metric.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-metric.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-poisson.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-poisson.pbtxt index befbf09ed11..ff7fdbb6382 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-poisson.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-poisson.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-precision-at-recall.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-precision-at-recall.pbtxt index 3f001a9d4e2..81d8a8d94d6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-precision-at-recall.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-precision-at-recall.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-precision.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-precision.pbtxt index e4d66868b1b..7a2dc1f7eaf 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-precision.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-precision.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-recall-at-precision.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-recall-at-precision.pbtxt index daad023ef66..f57b210b9e8 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-recall-at-precision.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-recall-at-precision.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-recall.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-recall.pbtxt index 40329ff21be..e62bad28d0c 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-recall.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-recall.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-root-mean-squared-error.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-root-mean-squared-error.pbtxt index a05adf6070a..3ee6eb4995e 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-root-mean-squared-error.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-root-mean-squared-error.pbtxt @@ -120,7 +120,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sensitivity-at-specificity.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sensitivity-at-specificity.pbtxt index 466ef391017..ddfadbdc66f 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sensitivity-at-specificity.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sensitivity-at-specificity.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-categorical-accuracy.pbtxt index 12a1f5daa14..f9c77f7a8a2 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-categorical-crossentropy.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-categorical-crossentropy.pbtxt index 86ae28fb876..7e1abedfebb 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-categorical-crossentropy.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-categorical-crossentropy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-top-k-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-top-k-categorical-accuracy.pbtxt index 6a52c10edbb..4a222c840a1 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-top-k-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sparse-top-k-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-specificity-at-sensitivity.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-specificity-at-sensitivity.pbtxt index 08a9118eac0..ce55374bd73 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-specificity-at-sensitivity.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-specificity-at-sensitivity.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-squared-hinge.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-squared-hinge.pbtxt index 810f4e61806..a9192f88606 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-squared-hinge.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-squared-hinge.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sum.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sum.pbtxt index 9ee1af61e34..a0def9553f5 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sum.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-sum.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-top-k-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-top-k-categorical-accuracy.pbtxt index 41dcc25644f..24511bd678c 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-top-k-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-top-k-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-true-negatives.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-true-negatives.pbtxt index 3726bff3850..b840940d24a 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-true-negatives.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-true-negatives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-true-positives.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-true-positives.pbtxt index 1ca4fc5c21b..87f6a87de98 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-true-positives.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.metrics.-true-positives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-model.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-model.pbtxt index 5acd7f4370b..13c3416fc0c 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-model.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-model.pbtxt @@ -139,7 +139,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-sequential.pbtxt index b9d9b116ea7..9218cbea99e 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.models.-sequential.pbtxt @@ -144,7 +144,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.raw_ops.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.raw_ops.pbtxt index 1f050e933ed..cf6b807502c 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.raw_ops.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.raw_ops.pbtxt @@ -3898,7 +3898,11 @@ tf_module { } member_method { name: "ShuffleAndRepeatDataset" - argspec: "args=[\'input_dataset\', \'buffer_size\', \'seed\', \'seed2\', \'count\', \'output_types\', \'output_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'input_dataset\', \'buffer_size\', \'seed\', \'seed2\', \'count\', \'output_types\', \'output_shapes\', \'reshuffle_each_iteration\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], " + } + member_method { + name: "ShuffleAndRepeatDatasetV2" + argspec: "args=[\'input_dataset\', \'buffer_size\', \'seed\', \'seed2\', \'count\', \'seed_generator\', \'output_types\', \'output_shapes\', \'reshuffle_each_iteration\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], " } member_method { name: "ShuffleDataset" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.config.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.config.pbtxt index 7876afae9a4..bf0c669cd93 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.config.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.config.pbtxt @@ -40,6 +40,10 @@ tf_module { name: "experimental_run_functions_eagerly" argspec: "args=[\'run_eagerly\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "functions_run_eagerly" + argspec: "args=[], varargs=None, keywords=None, defaults=None" + } member_method { name: "get_logical_device_configuration" argspec: "args=[\'device\'], varargs=None, keywords=None, defaults=None" @@ -60,6 +64,10 @@ tf_module { name: "list_physical_devices" argspec: "args=[\'device_type\'], varargs=None, keywords=None, defaults=[\'None\'], " } + member_method { + name: "run_functions_eagerly" + argspec: "args=[\'run_eagerly\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_logical_device_configuration" argspec: "args=[\'device\', \'logical_devices\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.-model.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.-model.pbtxt index 791362e93c0..272396239d7 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.-model.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.-model.pbtxt @@ -139,7 +139,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.-sequential.pbtxt index fe05efe7534..8979491971f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.-sequential.pbtxt @@ -144,7 +144,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-linear-model.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-linear-model.pbtxt index f8a6594d1e8..448ea60cc0f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-linear-model.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-linear-model.pbtxt @@ -140,7 +140,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-peephole-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-peephole-l-s-t-m-cell.pbtxt index 3b12b4e8055..ee5e5b884a2 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-peephole-l-s-t-m-cell.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-peephole-l-s-t-m-cell.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-sequence-features.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-sequence-features.pbtxt index 578fbf03f77..a51aa88ae23 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-sequence-features.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-sequence-features.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-wide-deep-model.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-wide-deep-model.pbtxt index 40e58253bb4..8e1d9927434 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-wide-deep-model.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.experimental.-wide-deep-model.pbtxt @@ -140,7 +140,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-abstract-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-abstract-r-n-n-cell.pbtxt index 3ba96bab6fe..8c80da861f6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-abstract-r-n-n-cell.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-abstract-r-n-n-cell.pbtxt @@ -125,7 +125,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activation.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activation.pbtxt index 3f59d9987a5..ef8efd606c4 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activation.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activation.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activity-regularization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activity-regularization.pbtxt index acc72ebf939..60578d2cc59 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activity-regularization.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-activity-regularization.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-add.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-add.pbtxt index 839d57e4c94..3bd1f2c7623 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-add.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-add.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-additive-attention.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-additive-attention.pbtxt index 1c22721666b..f6f8d3914b4 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-additive-attention.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-additive-attention.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-alpha-dropout.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-alpha-dropout.pbtxt index cf883e74088..3e408e96036 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-alpha-dropout.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-alpha-dropout.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-attention.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-attention.pbtxt index 70800bccf8c..4197c1a88f6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-attention.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-attention.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling1-d.pbtxt index 11f70522f1a..153a801e1d8 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling2-d.pbtxt index ff311806b47..66e261111ae 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling3-d.pbtxt index dc3cc76d9e1..b247490b067 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average-pooling3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average.pbtxt index 6fdcb8c9000..0f1808332c5 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-average.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool1-d.pbtxt index a5d912c9b8e..567143eb41d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool2-d.pbtxt index 7471b7306d3..56a2db85419 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool3-d.pbtxt index 323c0d51988..c0ab32fd7c2 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-avg-pool3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-batch-normalization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-batch-normalization.pbtxt index 71ca168a55c..6b1c609774e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-batch-normalization.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-batch-normalization.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-bidirectional.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-bidirectional.pbtxt index 16143b3b20e..ccb55ec0c52 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-bidirectional.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-bidirectional.pbtxt @@ -122,7 +122,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-concatenate.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-concatenate.pbtxt index 2bea88de2fd..c44ff9e48e2 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-concatenate.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-concatenate.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt index 444220d4e06..43112cfe785 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt @@ -207,7 +207,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d-transpose.pbtxt index 22de9fb79ff..3a592d713bc 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d.pbtxt index b45954626ba..cb2f7f03e56 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d-transpose.pbtxt index da6bfec7499..535243a2224 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d.pbtxt index b66d4fc4d3c..9d847c759a1 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d-transpose.pbtxt index 4e9ce619361..afcc8822af6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d.pbtxt index fedb39dbd21..68cbf32998e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-conv3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d-transpose.pbtxt index 28357ffa0f6..d81e4546670 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d.pbtxt index 6d97faacece..76d66200fbc 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt index 830caf7f693..c1f49885d87 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d.pbtxt index df115f618c7..8d874ede685 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt index 69f71b6a3ff..5d1d6d04505 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d-transpose.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d.pbtxt index f58aa3e1baa..f97c7617dbd 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-convolution3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping1-d.pbtxt index 44b66135732..29c0cd34098 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping1-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping2-d.pbtxt index 63591c0e984..b1ecb7d1204 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping2-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping3-d.pbtxt index b5e96804759..1e0fe6e6cf8 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-cropping3-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense-features.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense-features.pbtxt index d035db30248..f7137f0d09b 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense-features.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense-features.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense.pbtxt index 31b101ce81b..025c35eca17 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dense.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt index 46138e74b4b..d15459798cf 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-depthwise-conv2-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dot.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dot.pbtxt index 4f45a085317..777248192c6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dot.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dot.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dropout.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dropout.pbtxt index 869d8d4817b..fe114648bff 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dropout.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-dropout.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-e-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-e-l-u.pbtxt index 33a95bd2312..19429711e80 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-e-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-e-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-embedding.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-embedding.pbtxt index 35c25eab279..5ac35db6734 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-embedding.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-embedding.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-flatten.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-flatten.pbtxt index 955ec7a0a49..ff17ea72d45 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-flatten.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-flatten.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u-cell.pbtxt index 0bbca8b0628..df2fa8a2f5d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u-cell.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u-cell.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u.pbtxt index 8365c652b9d..24510d6a2bd 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-g-r-u.pbtxt @@ -192,7 +192,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-dropout.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-dropout.pbtxt index b966a1fa48a..8cfe9f9c692 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-dropout.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-dropout.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-noise.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-noise.pbtxt index bcadf04ab46..a64897f8849 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-noise.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-gaussian-noise.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt index 93f9f085028..7363d9d6521 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt index c1988faf3d7..58a08cd2d94 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt index 516e93110c5..1ec5624d8bf 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-average-pooling3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt index 545af759275..7931f0deb12 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt index 13fc0dade36..6db66c8ba9e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt index 5c6515f166d..ffd750c0522 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-avg-pool3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool1-d.pbtxt index 27bde045cbd..11762c021a9 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool2-d.pbtxt index 21ee43eb016..08043cb2926 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool3-d.pbtxt index 14fac4a4edd..f4155ac58aa 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pool3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt index 0cc18b9a462..6be5e6d4dea 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt index cb26f965881..69719674f2f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt index aef01152cfe..624163caa84 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-global-max-pooling3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-layer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-layer.pbtxt index 6366a29f0b9..39e79f7980b 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-layer.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-input-layer.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt index a0deeb6dbd3..428b078e9d8 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m-cell.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m.pbtxt index e000180ee73..0ddf19fcde9 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-l-s-t-m.pbtxt @@ -192,7 +192,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-lambda.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-lambda.pbtxt index 14b809390eb..22fa730112f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-lambda.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-lambda.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer-normalization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer-normalization.pbtxt index f1adf9b2178..d8e9445b8cb 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer-normalization.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer-normalization.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer.pbtxt index 2dcb55a3331..cdc76a45594 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-layer.pbtxt @@ -116,7 +116,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-leaky-re-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-leaky-re-l-u.pbtxt index 85b4a635d9e..06ffbc8fdf3 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-leaky-re-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-leaky-re-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected1-d.pbtxt index bb4c63d4289..c2826298321 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected1-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected2-d.pbtxt index 8068baf2931..da6934bae44 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-locally-connected2-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-masking.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-masking.pbtxt index 775cc8f4458..205bf1ed369 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-masking.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-masking.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool1-d.pbtxt index 8fd7d059937..df8c2dcd736 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool2-d.pbtxt index aadaea15b7b..20a2d5162f4 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool3-d.pbtxt index ea1c60e48d3..0bddc075006 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pool3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling1-d.pbtxt index b9f09656973..ac7827999ef 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling2-d.pbtxt index ade1e839676..f3ae4bb7e5e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling3-d.pbtxt index 2d129d415da..419b64d142f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-max-pooling3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-maximum.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-maximum.pbtxt index b4adbbcbea2..6535a951a1e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-maximum.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-maximum.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-minimum.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-minimum.pbtxt index 12d2cc690b8..d54b4d1bb60 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-minimum.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-minimum.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-multiply.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-multiply.pbtxt index 5e5d3992927..4dee52c2ac6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-multiply.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-multiply.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-p-re-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-p-re-l-u.pbtxt index 733fb63d1fb..84025572e83 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-p-re-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-p-re-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-permute.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-permute.pbtxt index 3e2d70a5a0a..9483167cb23 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-permute.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-permute.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-r-n-n.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-r-n-n.pbtxt index 3018929154e..7143160bed9 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-r-n-n.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-r-n-n.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-re-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-re-l-u.pbtxt index 7af41433d28..31b6b03c1b8 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-re-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-re-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-repeat-vector.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-repeat-vector.pbtxt index 52eb2c247cf..e5295928656 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-repeat-vector.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-repeat-vector.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-reshape.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-reshape.pbtxt index 08658b26be3..8b4773bc4f2 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-reshape.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-reshape.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv1-d.pbtxt index 9bab5a78338..dae9b58bc55 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv1-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv2-d.pbtxt index 2bcc06f9330..53ee61ca723 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-conv2-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution1-d.pbtxt index 823e28a8bb9..28935f62922 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution1-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution2-d.pbtxt index c27047ecd71..8c00f85609f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-separable-convolution2-d.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt index 417e79df321..068788d6b34 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n-cell.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n.pbtxt index e6e12106c6c..ddb87d74337 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-simple-r-n-n.pbtxt @@ -178,7 +178,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-softmax.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-softmax.pbtxt index 8b435bd2b41..cc5165ea47a 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-softmax.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-softmax.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt index d5fbff4d5c6..9fff96d8764 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout1-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt index 287e0167076..24fbd03ee49 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout2-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt index 78ab93ae395..50ec54308c9 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-spatial-dropout3-d.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt index 27afe1a56c6..9de71f557f6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-stacked-r-n-n-cells.pbtxt @@ -125,7 +125,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-subtract.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-subtract.pbtxt index b060c3169fd..05bbc5ad1be 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-subtract.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-subtract.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt index 272fd09afc6..2d34bf8754c 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-thresholded-re-l-u.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-time-distributed.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-time-distributed.pbtxt index 95274944084..c153411811f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-time-distributed.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-time-distributed.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling1-d.pbtxt index 8c8f4f287bd..07ca8e40761 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling1-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt index c56ea3122ed..f5b5f8bbf85 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling2-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling3-d.pbtxt index 80c647c9fc1..acfc1a33cfb 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-up-sampling3-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-wrapper.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-wrapper.pbtxt index 63423b9ee0c..58082daa2fa 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-wrapper.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-wrapper.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding1-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding1-d.pbtxt index e5a31b88df9..40e7a43ad53 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding1-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding1-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding2-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding2-d.pbtxt index b170d030fe8..473feb798f3 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding2-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding2-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding3-d.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding3-d.pbtxt index 6010e155661..ab8fca29714 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding3-d.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.-zero-padding3-d.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.-random-fourier-features.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.-random-fourier-features.pbtxt index b0458f8e8f1..cadf62e0d37 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.-random-fourier-features.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.-random-fourier-features.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.-sync-batch-normalization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.-sync-batch-normalization.pbtxt index e3a91f6791b..a589ffff174 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.-sync-batch-normalization.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.-sync-batch-normalization.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt index 4a846b138a9..5640a4d1dcd 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt index ea54293bca1..d52fffa12a3 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt @@ -123,7 +123,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt index d84d810bdd0..305b239c3e6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt index 44da86f424c..3fc5402fb39 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt index 29a82a1eab0..250880c9ae8 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt index 6dc385a78e2..39cd6af00a0 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt index 52cec4506fa..ce654bd1537 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt index 7096d9456d5..95c9cb2dd73 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt index 7e4089fd9e7..92dfa72a7a5 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt index 0d982f20e55..88108bfe9aa 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt index 7816930fd5c..7036fb926a8 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt index 05f110140bf..5313dfe9907 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt index 025b1a013cb..05154268354 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt @@ -123,7 +123,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-a-u-c.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-a-u-c.pbtxt index 5e263f72a03..56704ace966 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-a-u-c.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-a-u-c.pbtxt @@ -122,7 +122,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-accuracy.pbtxt index d211a16597e..fb970c23732 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-binary-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-binary-accuracy.pbtxt index 58103637fe3..09863e42eb1 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-binary-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-binary-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-binary-crossentropy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-binary-crossentropy.pbtxt index 4f748914101..eb033ce30a5 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-binary-crossentropy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-binary-crossentropy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-accuracy.pbtxt index 42e57f86769..9de555e3427 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-crossentropy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-crossentropy.pbtxt index 6ef136de517..fa41859b37e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-crossentropy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-crossentropy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-hinge.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-hinge.pbtxt index f3379748fb9..3ebaddb0e58 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-hinge.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-categorical-hinge.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-cosine-similarity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-cosine-similarity.pbtxt index 9367edcb228..7d8eafeb393 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-cosine-similarity.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-cosine-similarity.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-false-negatives.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-false-negatives.pbtxt index 820d2ed1e7c..3fa0db2af91 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-false-negatives.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-false-negatives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-false-positives.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-false-positives.pbtxt index de23747dcef..4e2a380445e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-false-positives.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-false-positives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-hinge.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-hinge.pbtxt index edae2d27448..66e416d57f6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-hinge.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-hinge.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-k-l-divergence.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-k-l-divergence.pbtxt index 171ade560a9..6fbbe6b9336 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-k-l-divergence.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-k-l-divergence.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-log-cosh-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-log-cosh-error.pbtxt index 8713d9aa427..f7f8f79eb17 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-log-cosh-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-log-cosh-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-absolute-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-absolute-error.pbtxt index 6c5541e71d9..adaf33d3608 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-absolute-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-absolute-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-absolute-percentage-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-absolute-percentage-error.pbtxt index 45b94842278..2f743849a8b 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-absolute-percentage-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-absolute-percentage-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-io-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-io-u.pbtxt index 90733200606..26fe404372d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-io-u.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-io-u.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-relative-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-relative-error.pbtxt index 5997066e37a..1d3eae22f8c 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-relative-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-relative-error.pbtxt @@ -120,7 +120,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-squared-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-squared-error.pbtxt index b6a0e00ffa0..3fe23a73576 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-squared-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-squared-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-squared-logarithmic-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-squared-logarithmic-error.pbtxt index b698ab5ff65..2b98c31a6c7 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-squared-logarithmic-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-squared-logarithmic-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-tensor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-tensor.pbtxt index 01a3d3f6e07..772bf62a923 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-tensor.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean-tensor.pbtxt @@ -126,7 +126,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean.pbtxt index c47a1bc749c..bc14d53dbee 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-mean.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-metric.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-metric.pbtxt index fe72d0ad1d6..8a6977835a0 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-metric.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-metric.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-poisson.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-poisson.pbtxt index befbf09ed11..ff7fdbb6382 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-poisson.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-poisson.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-precision-at-recall.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-precision-at-recall.pbtxt index 3f001a9d4e2..81d8a8d94d6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-precision-at-recall.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-precision-at-recall.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-precision.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-precision.pbtxt index e4d66868b1b..7a2dc1f7eaf 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-precision.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-precision.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-recall-at-precision.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-recall-at-precision.pbtxt index daad023ef66..f57b210b9e8 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-recall-at-precision.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-recall-at-precision.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-recall.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-recall.pbtxt index 40329ff21be..e62bad28d0c 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-recall.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-recall.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-root-mean-squared-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-root-mean-squared-error.pbtxt index a05adf6070a..3ee6eb4995e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-root-mean-squared-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-root-mean-squared-error.pbtxt @@ -120,7 +120,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sensitivity-at-specificity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sensitivity-at-specificity.pbtxt index 466ef391017..ddfadbdc66f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sensitivity-at-specificity.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sensitivity-at-specificity.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-categorical-accuracy.pbtxt index 12a1f5daa14..f9c77f7a8a2 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-categorical-crossentropy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-categorical-crossentropy.pbtxt index 86ae28fb876..7e1abedfebb 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-categorical-crossentropy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-categorical-crossentropy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-top-k-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-top-k-categorical-accuracy.pbtxt index 6a52c10edbb..4a222c840a1 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-top-k-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sparse-top-k-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-specificity-at-sensitivity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-specificity-at-sensitivity.pbtxt index 08a9118eac0..ce55374bd73 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-specificity-at-sensitivity.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-specificity-at-sensitivity.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-squared-hinge.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-squared-hinge.pbtxt index 810f4e61806..a9192f88606 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-squared-hinge.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-squared-hinge.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sum.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sum.pbtxt index 9ee1af61e34..a0def9553f5 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sum.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-sum.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-top-k-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-top-k-categorical-accuracy.pbtxt index 41dcc25644f..24511bd678c 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-top-k-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-top-k-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-true-negatives.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-true-negatives.pbtxt index 3726bff3850..b840940d24a 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-true-negatives.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-true-negatives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-true-positives.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-true-positives.pbtxt index 1ca4fc5c21b..87f6a87de98 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-true-positives.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.metrics.-true-positives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-model.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-model.pbtxt index 5acd7f4370b..13c3416fc0c 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-model.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-model.pbtxt @@ -139,7 +139,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-sequential.pbtxt index b9d9b116ea7..9218cbea99e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.models.-sequential.pbtxt @@ -144,7 +144,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-a-u-c.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-a-u-c.pbtxt index 45e32e8e736..03451eb1ae7 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-a-u-c.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-a-u-c.pbtxt @@ -122,7 +122,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-accuracy.pbtxt index 021fe16877a..7011c6d74fa 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-binary-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-binary-accuracy.pbtxt index c87a817fd32..1259bbab1eb 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-binary-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-binary-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-binary-crossentropy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-binary-crossentropy.pbtxt index 03480e9b8c9..edceee32ad0 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-binary-crossentropy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-binary-crossentropy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-accuracy.pbtxt index fb635300604..442e48571cb 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-crossentropy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-crossentropy.pbtxt index 155ab36818a..ac5beaf47db 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-crossentropy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-crossentropy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-hinge.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-hinge.pbtxt index f5fb6f79099..0999ad8ba56 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-hinge.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-categorical-hinge.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-cosine-similarity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-cosine-similarity.pbtxt index 8c8e76c2f3e..a77ac482bdb 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-cosine-similarity.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-cosine-similarity.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-false-negatives.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-false-negatives.pbtxt index 1427375ea4d..171820ce02d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-false-negatives.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-false-negatives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-false-positives.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-false-positives.pbtxt index 06095a48cab..2ae474f4faa 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-false-positives.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-false-positives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-hinge.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-hinge.pbtxt index 623f74d34fc..daddc6e44da 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-hinge.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-hinge.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-k-l-divergence.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-k-l-divergence.pbtxt index 6bf1092aa68..4b1e4bfb92c 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-k-l-divergence.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-k-l-divergence.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-log-cosh-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-log-cosh-error.pbtxt index 7812d8715b5..e37629b0dc6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-log-cosh-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-log-cosh-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-absolute-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-absolute-error.pbtxt index 0719052ccab..c2267f83969 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-absolute-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-absolute-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-absolute-percentage-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-absolute-percentage-error.pbtxt index c644e71f8a1..c7f57ed5244 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-absolute-percentage-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-absolute-percentage-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-io-u.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-io-u.pbtxt index 102f4715c7e..32b6b4e9fad 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-io-u.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-io-u.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-relative-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-relative-error.pbtxt index c4c5608c4c0..23e79579eb8 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-relative-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-relative-error.pbtxt @@ -120,7 +120,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-squared-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-squared-error.pbtxt index 6847db69ead..c924ffb55b9 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-squared-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-squared-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-squared-logarithmic-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-squared-logarithmic-error.pbtxt index fe8c55b9465..f3d29557a17 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-squared-logarithmic-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-squared-logarithmic-error.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-tensor.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-tensor.pbtxt index e43e5265258..2a22c1a5b63 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-tensor.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean-tensor.pbtxt @@ -126,7 +126,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean.pbtxt index 943b3029985..94fb1937b76 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-mean.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-metric.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-metric.pbtxt index 2f9d17d37e9..f3bb587cca5 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-metric.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-metric.pbtxt @@ -117,7 +117,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-poisson.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-poisson.pbtxt index b583863298c..0fa457f3553 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-poisson.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-poisson.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-precision-at-recall.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-precision-at-recall.pbtxt index ddd3954ed18..a26305e2d0f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-precision-at-recall.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-precision-at-recall.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-precision.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-precision.pbtxt index 9d74d0fbc70..73743ba32a0 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-precision.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-precision.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-recall-at-precision.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-recall-at-precision.pbtxt index 7ad458bc2db..87f38a2d95d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-recall-at-precision.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-recall-at-precision.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-recall.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-recall.pbtxt index c6f4672ff20..4e339c3f772 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-recall.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-recall.pbtxt @@ -118,7 +118,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-root-mean-squared-error.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-root-mean-squared-error.pbtxt index 97ba4531f73..0f57806d0f4 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-root-mean-squared-error.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-root-mean-squared-error.pbtxt @@ -120,7 +120,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sensitivity-at-specificity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sensitivity-at-specificity.pbtxt index b29b4cdfa54..e82f86f28e7 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sensitivity-at-specificity.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sensitivity-at-specificity.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-categorical-accuracy.pbtxt index 408743cb054..60f2e2e89b5 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-categorical-crossentropy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-categorical-crossentropy.pbtxt index ebb08e7efc1..a24216cd16e 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-categorical-crossentropy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-categorical-crossentropy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-top-k-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-top-k-categorical-accuracy.pbtxt index 4c8925cd1c9..e4139b23999 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-top-k-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sparse-top-k-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-specificity-at-sensitivity.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-specificity-at-sensitivity.pbtxt index c714447e72a..cf55bba0ee9 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-specificity-at-sensitivity.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-specificity-at-sensitivity.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-squared-hinge.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-squared-hinge.pbtxt index ca6a07356ca..997363b92bc 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-squared-hinge.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-squared-hinge.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sum.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sum.pbtxt index b5985644e73..2a3c2a398eb 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sum.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-sum.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-top-k-categorical-accuracy.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-top-k-categorical-accuracy.pbtxt index 99b712beb94..006bbc68a40 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-top-k-categorical-accuracy.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-top-k-categorical-accuracy.pbtxt @@ -121,7 +121,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-true-negatives.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-true-negatives.pbtxt index 2c31194e622..5550520f3c7 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-true-negatives.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-true-negatives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-true-positives.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-true-positives.pbtxt index d2567e9050d..06892683ac3 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.metrics.-true-positives.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.metrics.-true-positives.pbtxt @@ -119,7 +119,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-device-wrapper.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-device-wrapper.pbtxt index 7b08c50d6f9..cdcf7f23edc 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-device-wrapper.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-device-wrapper.pbtxt @@ -128,7 +128,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-dropout-wrapper.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-dropout-wrapper.pbtxt index 7fc672d250e..75e827a8ab9 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-dropout-wrapper.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-dropout-wrapper.pbtxt @@ -132,7 +132,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-residual-wrapper.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-residual-wrapper.pbtxt index f7cbc16a57d..faf951f2153 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-residual-wrapper.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.nn.-r-n-n-cell-residual-wrapper.pbtxt @@ -128,7 +128,7 @@ tf_class { } member_method { name: "add_loss" - argspec: "args=[\'self\', \'losses\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" } member_method { name: "add_metric" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.raw_ops.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.raw_ops.pbtxt index 1f050e933ed..cf6b807502c 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.raw_ops.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.raw_ops.pbtxt @@ -3898,7 +3898,11 @@ tf_module { } member_method { name: "ShuffleAndRepeatDataset" - argspec: "args=[\'input_dataset\', \'buffer_size\', \'seed\', \'seed2\', \'count\', \'output_types\', \'output_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], " + argspec: "args=[\'input_dataset\', \'buffer_size\', \'seed\', \'seed2\', \'count\', \'output_types\', \'output_shapes\', \'reshuffle_each_iteration\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], " + } + member_method { + name: "ShuffleAndRepeatDatasetV2" + argspec: "args=[\'input_dataset\', \'buffer_size\', \'seed\', \'seed2\', \'count\', \'seed_generator\', \'output_types\', \'output_shapes\', \'reshuffle_each_iteration\', \'name\'], varargs=None, keywords=None, defaults=[\'True\', \'None\'], " } member_method { name: "ShuffleDataset" diff --git a/tensorflow/tools/compatibility/renames_v2.py b/tensorflow/tools/compatibility/renames_v2.py index 1a0afb6c804..8bccf4f0487 100644 --- a/tensorflow/tools/compatibility/renames_v2.py +++ b/tensorflow/tools/compatibility/renames_v2.py @@ -524,6 +524,10 @@ renames = { 'tf.compat.v1.keras.layers.CuDNNGRU', 'tf.keras.layers.CuDNNLSTM': 'tf.compat.v1.keras.layers.CuDNNLSTM', + 'tf.keras.layers.disable_v2_dtype_behavior': + 'tf.compat.v1.keras.layers.disable_v2_dtype_behavior', + 'tf.keras.layers.enable_v2_dtype_behavior': + 'tf.compat.v1.keras.layers.enable_v2_dtype_behavior', 'tf.keras.losses.cosine': 'tf.keras.losses.cosine_similarity', 'tf.keras.losses.cosine_proximity': diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 7eaeac0a4ef..ce90b5b60c1 100755 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -162,6 +162,19 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): print("path_prefix was specified to tf_workspace but is no longer used " + "and will be removed in the future.") + TFRT_COMMIT = "91370b32f683333d39adb736f81463b6418d6775" + TFRT_SHA256 = "0105c47c78bba61a366823ce9b6ac221dd92dc7b09a8927ae13e7cc334598ea6" + TFRT_URLS = [ + "http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.zip".format(commit = TFRT_COMMIT), + "https://github.com/tensorflow/runtime/archive/{commit}.zip".format(commit = TFRT_COMMIT), + ] + tf_http_archive( + name = "tfrt", + sha256 = TFRT_SHA256, + strip_prefix = "runtime-" + TFRT_COMMIT, + urls = TFRT_URLS, + ) + tf_http_archive( name = "XNNPACK", sha256 = "41a0a396a5a9cb2171c1c7f6d7689316beaa6f638663161fc7f86450eba33070", @@ -666,8 +679,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""): ) # Check out LLVM and MLIR from llvm-project. - LLVM_COMMIT = "ef06016d73390d5b380018cc0d16003b4ed4a35a" - LLVM_SHA256 = "9e5a58b59326f06374ad7d380dd83bf9d5770493610e824c1c0bbf1ca76f5385" + LLVM_COMMIT = "52eb2f65a7d28bb225ca8a0bc8c4090d324e22d9" + LLVM_SHA256 = "ee10022c1b0f6f07cc9fc22ff4c4ec97e31d8d11e08119f0f084e238547df340" LLVM_URLS = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), diff --git a/third_party/hexagon/workspace.bzl b/third_party/hexagon/workspace.bzl index 0f1c63e6a1e..1a682f0e8ad 100644 --- a/third_party/hexagon/workspace.bzl +++ b/third_party/hexagon/workspace.bzl @@ -2,7 +2,7 @@ load("//third_party:repo.bzl", "third_party_http_archive") -# Note: Use libhexagon_nn_skel version 1.14 Only with the current version. +# Note: Use libhexagon_nn_skel version 1.17 Only with the current version. # This comment will be updated with compatible version. def repo(): third_party_http_archive( diff --git a/third_party/mlir/BUILD b/third_party/mlir/BUILD index 30e26462133..ab7734ef17d 100644 --- a/third_party/mlir/BUILD +++ b/third_party/mlir/BUILD @@ -127,7 +127,6 @@ cc_library( hdrs = [ "include/mlir-c/Core.h", "include/mlir/EDSC/Builders.h", - "include/mlir/EDSC/Intrinsics.h", ], includes = ["include"], deps = [ @@ -3071,6 +3070,7 @@ cc_library( ":Support", ":Transforms", ":VectorToLLVM", + ":VectorToLoops", "@llvm-project//llvm:core", "@llvm-project//llvm:support", ], diff --git a/third_party/mlir/test.BUILD b/third_party/mlir/test.BUILD index e180ec34ef3..43cc152b601 100644 --- a/third_party/mlir/test.BUILD +++ b/third_party/mlir/test.BUILD @@ -86,6 +86,14 @@ gentbl( "-gen-enum-defs", "lib/Dialect/Test/TestOpEnums.cpp.inc", ), + ( + "-gen-struct-attr-decls", + "lib/Dialect/Test/TestOpStructs.h.inc", + ), + ( + "-gen-struct-attr-defs", + "lib/Dialect/Test/TestOpStructs.cpp.inc", + ), ( "-gen-rewriters", "lib/Dialect/Test/TestPatterns.inc", diff --git a/third_party/ruy/workspace.bzl b/third_party/ruy/workspace.bzl index e408f74ffb4..bbd792fd87d 100644 --- a/third_party/ruy/workspace.bzl +++ b/third_party/ruy/workspace.bzl @@ -5,11 +5,11 @@ load("//third_party:repo.bzl", "third_party_http_archive") def repo(): third_party_http_archive( name = "ruy", - sha256 = "fe8345f521bb378745ebdd0f8c5937414849936851d2ec2609774eb2d7098e54", - strip_prefix = "ruy-9f53ba413e6fc879236dcaa3e008915973d67a4f", + sha256 = "51c1492196cdd6fc524dd8b539de5d644bbb436699fab3908585a575e347c789", + strip_prefix = "ruy-4bdb31ab484e624deef9620ecde2156ca17f6567", urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/ruy/archive/9f53ba413e6fc879236dcaa3e008915973d67a4f.zip", - "https://github.com/google/ruy/archive/9f53ba413e6fc879236dcaa3e008915973d67a4f.zip", + "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/ruy/archive/4bdb31ab484e624deef9620ecde2156ca17f6567.zip", + "https://github.com/google/ruy/archive/4bdb31ab484e624deef9620ecde2156ca17f6567.zip", ], build_file = "//third_party/ruy:BUILD", ) diff --git a/third_party/toolchains/remote_config/configs.bzl b/third_party/toolchains/remote_config/configs.bzl index ff03da04fa1..4098e5f1580 100644 --- a/third_party/toolchains/remote_config/configs.bzl +++ b/third_party/toolchains/remote_config/configs.bzl @@ -55,7 +55,7 @@ def initialize_rbe_configs(): ) tensorflow_rbe_config( - name = "ubuntu16.04-py3-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0", + name = "ubuntu16.04-clang_manylinux2010-cuda10.1-cudnn7-tensorrt6.0", compiler = "/clang_r42cab985fd95ba4f3f290e7bb26b93805edb447d/bin/clang", cuda_version = "10.1", cudnn_version = "7",