diff --git a/RELEASE.md b/RELEASE.md index 1011610350d..af3acb177ce 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -42,6 +42,10 @@ * Removed deprecated `Interpreter::UseNNAPI(bool)` C++ API. * Use `NnApiDelegate()` and related delegate configuration methods directly. +* TF Core: + * Corrected higher-order gradients of control flow constructs (`tf.cond`, + `tf.while_loop`, and compositions like `tf.foldl`) computed with + `tf.GradientTape` inside a `tf.function`. ## Thanks to our Contributors diff --git a/tensorflow/c/eager/c_api_test.cc b/tensorflow/c/eager/c_api_test.cc index fd208c6770d..0f5f494e5e2 100644 --- a/tensorflow/c/eager/c_api_test.cc +++ b/tensorflow/c/eager/c_api_test.cc @@ -769,7 +769,7 @@ void Execute_MatMul_CPU_Runtime_Error(bool async) { TF_Tensor* t = TFE_TensorHandleResolve(retvals[0], status); EXPECT_NE(TF_OK, TF_GetCode(status)); EXPECT_EQ(nullptr, t); - const char* msg = "Matrix size-incompatible: In[0]: [2,2], In[1]: [3,2]"; + const char* msg = "In[0] mismatch In[1] shape: 2 vs. 3: [2,2] [3,2]"; EXPECT_TRUE(strstr(TF_Message(status), msg) != nullptr) << TF_Message(status); // Since error is not cleared, the following copy with correct device will diff --git a/tensorflow/compiler/jit/xla_launch_util.cc b/tensorflow/compiler/jit/xla_launch_util.cc index a0e60b1eafe..66922f901a1 100644 --- a/tensorflow/compiler/jit/xla_launch_util.cc +++ b/tensorflow/compiler/jit/xla_launch_util.cc @@ -583,7 +583,11 @@ XlaComputationLaunchContext::BuildXlaCompilerArguments( XlaCompiler::Argument& arg = out[input_num]; if (absl::c_binary_search(must_be_constant_idxs, input_num)) { // Handles compile-time constants. - TF_RET_CHECK(input->dtype() != DT_RESOURCE); + + // TODO(b/157241314): Support constants located in resource variables. + TF_RET_CHECK(input->dtype() != DT_RESOURCE) + << "tf2xla bridge does not support must-be-constants located in " + "resource variables; try moving them to a tensor"; arg.kind = XlaCompiler::Argument::kConstant; arg.type = input->dtype(); arg.shape = input->shape(); diff --git a/tensorflow/compiler/mlir/hlo/BUILD b/tensorflow/compiler/mlir/hlo/BUILD index a719f303d3d..e1b81133724 100644 --- a/tensorflow/compiler/mlir/hlo/BUILD +++ b/tensorflow/compiler/mlir/hlo/BUILD @@ -517,6 +517,15 @@ cc_library( ], ) +cc_library( + name = "map_chlo_to_hlo_op", + hdrs = ["include/mlir-hlo/Dialect/mhlo/transforms/map_chlo_to_hlo_op.h"], + deps = [ + ":hlo", + "@llvm-project//mlir:IR", + ], +) + cc_library( name = "map_hlo_to_lhlo_op", hdrs = ["include/mlir-hlo/Dialect/mhlo/transforms/map_hlo_to_lhlo_op.h"], @@ -606,9 +615,11 @@ cc_library( ], deps = [ ":hlo", + ":map_chlo_to_hlo_op", "@llvm-project//llvm:Support", "@llvm-project//mlir:IR", "@llvm-project//mlir:Pass", + "@llvm-project//mlir:SCFDialect", "@llvm-project//mlir:Shape", "@llvm-project//mlir:StandardOps", "@llvm-project//mlir:Transforms", @@ -893,6 +904,7 @@ cc_library( deps = [ ":chlo_legalize_to_hlo_inc_gen", ":hlo", + ":map_chlo_to_hlo_op", "@llvm-project//mlir:IR", "@llvm-project//mlir:SCFDialect", "@llvm-project//mlir:Shape", diff --git a/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/map_chlo_to_hlo_op.h b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/map_chlo_to_hlo_op.h new file mode 100644 index 00000000000..316e65076ae --- /dev/null +++ b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/mhlo/transforms/map_chlo_to_hlo_op.h @@ -0,0 +1,97 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_COMPILER_MLIR_HLO_INCLUDE_MLIR_HLO_DIALECT_MHLO_TRANSFORMS_MAP_CHLO_TO_MHLO_OP_H_ +#define TENSORFLOW_COMPILER_MLIR_HLO_INCLUDE_MLIR_HLO_DIALECT_MHLO_TRANSFORMS_MAP_CHLO_TO_MHLO_OP_H_ + +#include + +#include "mlir-hlo/Dialect/mhlo/IR/chlo_ops.h" +#include "mlir-hlo/Dialect/mhlo/IR/hlo_ops.h" +#include "mlir/IR/PatternMatch.h" + +namespace mlir { +namespace chlo { + +struct HloComplexAdaptor { + static mhlo::ComplexOp CreateOp(BroadcastComplexOp from_op, Type result_type, + Value broadcasted_lhs, Value broadcasted_rhs, + OpBuilder &builder) { + return builder.create(from_op.getLoc(), result_type, + broadcasted_lhs, broadcasted_rhs); + } +}; +template +struct HloBinaryElementwiseAdaptor { + static ToOpTy CreateOp(FromOpTy from_op, Type result_type, + Value broadcasted_lhs, Value broadcasted_rhs, + OpBuilder &builder) { + return builder.create(from_op.getLoc(), result_type, + broadcasted_lhs, broadcasted_rhs); + } +}; +struct HloCompareAdaptor { + static mhlo::CompareOp CreateOp(BroadcastCompareOp from_op, Type result_type, + Value broadcasted_lhs, Value broadcasted_rhs, + OpBuilder &builder) { + return builder.create( + from_op.getLoc(), result_type, broadcasted_lhs, broadcasted_rhs, + from_op.comparison_direction(), from_op.compare_typeAttr()); + } +}; + +// Populate a pattern for each Broadcasting CHlo op. This requires the pattern +// to take a ChloOpTy, MhloOpTy, and an Adaptor as templated values. +template