Fix mismatched argument comments to match parameter names

PiperOrigin-RevId: 178617543
This commit is contained in:
A. Unique TensorFlower 2017-12-11 07:49:41 -08:00 committed by TensorFlower Gardener
parent 22e0870f62
commit 309f7e29a6
5 changed files with 9 additions and 9 deletions

View File

@ -515,7 +515,7 @@ TYPED_TEST(LiteralUtilTestTemplated, Relayout2x2) {
TEST_F(LiteralUtilTest, ReshapeR0) {
auto original = Literal::CreateR0<float>(1.7f);
auto reshape = original->Reshape(/*shape=*/{}).ConsumeValueOrDie();
auto reshape = original->Reshape(/*dimensions=*/{}).ConsumeValueOrDie();
EXPECT_EQ(*original, *reshape);
}

View File

@ -602,7 +602,7 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
llvm::Function * ir_function,
ir_emitter.EmitComputation(
embedded_computation, embedded_computation->name(),
/*is_entry_computation=*/computation_is_parallel,
/*is_top_level_computation=*/computation_is_parallel,
/*instruction_order=*/nullptr));
// If this computation is parallel, remember it in the function name map.
// This way we know what function to execute when we try to run code for
@ -684,7 +684,7 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
ir_emitter
.EmitComputation(embedded_computation,
embedded_computation->name(),
/*is_entry_computation=*/false,
/*is_top_level_computation=*/false,
&module_sequence.at(embedded_computation))
.status());
}
@ -693,7 +693,7 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
TF_ASSIGN_OR_RETURN(
llvm::Function * entry_function,
ir_emitter.EmitComputation(computation, function_name_prefix,
/*is_entry_computation=*/true,
/*is_top_level_computation=*/true,
&module_sequence.at(computation)));
string function_name = llvm_ir::AsString(entry_function->getName());
@ -858,7 +858,7 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
ir_emitter
.EmitComputation(embedded_computation,
embedded_computation->name(),
/*is_entry_computation=*/false,
/*is_top_level_computation=*/false,
&module_sequence.at(embedded_computation))
.status());
}
@ -866,7 +866,7 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
TF_ASSIGN_OR_RETURN(
llvm::Function * entry_function,
ir_emitter.EmitComputation(computation, entry_point_name,
/*is_entry_computation=*/true,
/*is_top_level_computation=*/true,
&module_sequence.at(computation)));
CHECK(entry_function->getName() == llvm_ir::AsStringRef(entry_point_name));

View File

@ -647,7 +647,7 @@ class Conv3DBackpropInputOp<GPUDevice, T> : public OpKernel {
{{filter_size[0], filter_size[1], filter_size[2]}},
// TODO(yangzihao): Send in arbitrary dilation rates after the dilated
// conv is supported.
/*dilations=*/{{1, 1, 1}},
/*dilation=*/{{1, 1, 1}},
{{strides[0], strides[1], strides[2]}},
{{padding_planes, padding_rows, padding_cols}},
dtype,

View File

@ -379,7 +379,7 @@ struct LaunchConvOp<GPUDevice, T> {
{{filter_planes, filter_rows, filter_cols}},
// TODO(yangzihao): Send in arbitrary dilation rates after the dilated
// conv is supported.
/*dilations=*/{{1, 1, 1}},
/*dilation=*/{{1, 1, 1}},
{{strides[0], strides[1], strides[2]}},
{{pad_planes, pad_rows, pad_cols}},
dtype,

View File

@ -561,7 +561,7 @@ static bool TensorOpMathEnabled() {
static bool is_enabled = [] {
bool ret;
TF_CHECK_OK(tensorflow::ReadBoolFromEnvVar("TF_DISABLE_TENSOR_OP_MATH",
/*default=*/false, &ret));
/*default_val=*/false, &ret));
return !ret;
}();
return is_enabled;