diff --git a/tensorflow/compiler/xla/literal_util_test.cc b/tensorflow/compiler/xla/literal_util_test.cc index 816bb3c549e..7ff64c41341 100644 --- a/tensorflow/compiler/xla/literal_util_test.cc +++ b/tensorflow/compiler/xla/literal_util_test.cc @@ -515,7 +515,7 @@ TYPED_TEST(LiteralUtilTestTemplated, Relayout2x2) { TEST_F(LiteralUtilTest, ReshapeR0) { auto original = Literal::CreateR0(1.7f); - auto reshape = original->Reshape(/*shape=*/{}).ConsumeValueOrDie(); + auto reshape = original->Reshape(/*dimensions=*/{}).ConsumeValueOrDie(); EXPECT_EQ(*original, *reshape); } diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc index a476a75027b..44d80d75f57 100644 --- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc +++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc @@ -602,7 +602,7 @@ StatusOr> CpuCompiler::RunBackend( llvm::Function * ir_function, ir_emitter.EmitComputation( embedded_computation, embedded_computation->name(), - /*is_entry_computation=*/computation_is_parallel, + /*is_top_level_computation=*/computation_is_parallel, /*instruction_order=*/nullptr)); // If this computation is parallel, remember it in the function name map. // This way we know what function to execute when we try to run code for @@ -684,7 +684,7 @@ StatusOr> CpuCompiler::RunBackend( ir_emitter .EmitComputation(embedded_computation, embedded_computation->name(), - /*is_entry_computation=*/false, + /*is_top_level_computation=*/false, &module_sequence.at(embedded_computation)) .status()); } @@ -693,7 +693,7 @@ StatusOr> CpuCompiler::RunBackend( TF_ASSIGN_OR_RETURN( llvm::Function * entry_function, ir_emitter.EmitComputation(computation, function_name_prefix, - /*is_entry_computation=*/true, + /*is_top_level_computation=*/true, &module_sequence.at(computation))); string function_name = llvm_ir::AsString(entry_function->getName()); @@ -858,7 +858,7 @@ CpuCompiler::CompileAheadOfTime(std::vector> modules, ir_emitter .EmitComputation(embedded_computation, embedded_computation->name(), - /*is_entry_computation=*/false, + /*is_top_level_computation=*/false, &module_sequence.at(embedded_computation)) .status()); } @@ -866,7 +866,7 @@ CpuCompiler::CompileAheadOfTime(std::vector> modules, TF_ASSIGN_OR_RETURN( llvm::Function * entry_function, ir_emitter.EmitComputation(computation, entry_point_name, - /*is_entry_computation=*/true, + /*is_top_level_computation=*/true, &module_sequence.at(computation))); CHECK(entry_function->getName() == llvm_ir::AsStringRef(entry_point_name)); diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc index 4d0f1ab317a..3650ab53b25 100644 --- a/tensorflow/core/kernels/conv_grad_ops_3d.cc +++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc @@ -647,7 +647,7 @@ class Conv3DBackpropInputOp : public OpKernel { {{filter_size[0], filter_size[1], filter_size[2]}}, // TODO(yangzihao): Send in arbitrary dilation rates after the dilated // conv is supported. - /*dilations=*/{{1, 1, 1}}, + /*dilation=*/{{1, 1, 1}}, {{strides[0], strides[1], strides[2]}}, {{padding_planes, padding_rows, padding_cols}}, dtype, diff --git a/tensorflow/core/kernels/conv_ops_3d.cc b/tensorflow/core/kernels/conv_ops_3d.cc index 39202d73348..21c84b2a0ed 100644 --- a/tensorflow/core/kernels/conv_ops_3d.cc +++ b/tensorflow/core/kernels/conv_ops_3d.cc @@ -379,7 +379,7 @@ struct LaunchConvOp { {{filter_planes, filter_rows, filter_cols}}, // TODO(yangzihao): Send in arbitrary dilation rates after the dilated // conv is supported. - /*dilations=*/{{1, 1, 1}}, + /*dilation=*/{{1, 1, 1}}, {{strides[0], strides[1], strides[2]}}, {{pad_planes, pad_rows, pad_cols}}, dtype, diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc index d78362d4fba..1e26f53ae1b 100644 --- a/tensorflow/stream_executor/cuda/cuda_dnn.cc +++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc @@ -561,7 +561,7 @@ static bool TensorOpMathEnabled() { static bool is_enabled = [] { bool ret; TF_CHECK_OK(tensorflow::ReadBoolFromEnvVar("TF_DISABLE_TENSOR_OP_MATH", - /*default=*/false, &ret)); + /*default_val=*/false, &ret)); return !ret; }(); return is_enabled;