Use "empty" instead of "size"-based checks, and replace C-style casts by static_cast.

PiperOrigin-RevId: 233247794
This commit is contained in:
A. Unique TensorFlower 2019-02-09 19:33:32 -08:00 committed by TensorFlower Gardener
parent bce5bff90a
commit ead36c50c1
6 changed files with 20 additions and 18 deletions

View File

@ -222,7 +222,7 @@ tensorflow::Status ConvertCalibGraphToInferGraph(
cres->thr_->join();
const auto& calibration_table =
cres->calibrator_->getCalibrationTableAsString();
if (!calibration_table.size()) {
if (calibration_table.empty()) {
LOG(ERROR) << "Calibration table is empty";
return tensorflow::errors::Unknown(
"Calibration table is missing. This shouldn't have happened!");
@ -662,8 +662,8 @@ tensorflow::Status CreateTRTNode(const std::vector<EngineInfo>& infos, int pos,
info.use_calibration,
/*convert_successfully=*/nullptr));
TrtUniquePtrType<nvinfer1::IHostMemory> engine_data(engine->serialize());
segment_string =
string((const char*)engine_data->data(), engine_data->size());
segment_string = string(static_cast<const char*>(engine_data->data()),
engine_data->size());
if (calibrate_int8) {
// See above comment about why not putting this inside the 'else' branch.
segment_string = info.segment_graph_def.SerializeAsString();

View File

@ -2106,7 +2106,7 @@ tensorflow::Status ConvertSqueeze(OpConverterParams* params) {
// Mark axes to remove by setting them to 0.
TFAttrs attrs(node_def);
auto squeeze_dims = attrs.get<std::vector<int>>("squeeze_dims");
if (squeeze_dims.size() == 0) {
if (squeeze_dims.empty()) {
return tensorflow::errors::Unimplemented(
"Squeeze is only implemented for explicit dims, at ", node_def.name());
}
@ -2274,7 +2274,7 @@ tensorflow::Status ConvertStridedSlice(OpConverterParams* params) {
pad_dims.push_back(i);
}
}
if (pad_dims.size() == 0) {
if (pad_dims.empty()) {
// No dimensions are changed. We could create a padding layer anyway with
// values of 0.
if (params->validation_only) return Status::OK();
@ -3138,7 +3138,7 @@ tensorflow::Status ConvertPad(OpConverterParams* params) {
}
// No padding at all, we should exit
if (pad_index.size() == 0) {
if (pad_index.empty()) {
params->outputs->push_back(inputs.at(0));
return tensorflow::Status::OK();
}

View File

@ -87,7 +87,7 @@ void TRTOptimizationPass::PrintDebugInfo(
LOG(INFO) << offset << "type = " << cluster->type();
LOG(INFO) << offset << "num warmup steps = " << cluster->NumWarmupSteps();
const auto dev_names = cluster->GetDeviceNames();
if (dev_names.size()) {
if (!dev_names.empty()) {
LOG(INFO) << offset << " Device names:";
for (const auto s : dev_names) {
LOG(INFO) << offset2 << s;
@ -103,7 +103,7 @@ void TRTOptimizationPass::PrintDebugInfo(
}
const auto dev_props = cluster->GetDevices();
if (dev_props.size()) {
if (!dev_props.empty()) {
LOG(INFO) << offset << "Device properties:";
for (auto k : dev_props) {
LOG(INFO) << offset2 << k.first;
@ -131,7 +131,7 @@ void TRTOptimizationPass::PrintDebugInfo(
}
}
LOG(INFO) << "item: " << item.id;
if (item.feed.size()) {
if (!item.feed.empty()) {
LOG(INFO) << offset << "Feeds :";
for (const auto& f : item.feed) {
const auto& shape = f.second.shape();
@ -140,7 +140,7 @@ void TRTOptimizationPass::PrintDebugInfo(
} else {
LOG(INFO) << offset << "No Feeds";
}
if (item.fetch.size()) {
if (!item.fetch.empty()) {
LOG(INFO) << offset << "Fetches :";
for (const auto& f : item.fetch) {
LOG(INFO) << offset2 << f;
@ -149,7 +149,7 @@ void TRTOptimizationPass::PrintDebugInfo(
LOG(INFO) << offset << "No Fetches";
}
if (item.init_ops.size()) {
if (!item.init_ops.empty()) {
LOG(INFO) << offset << "init ops :";
for (const auto& f : item.init_ops) {
LOG(INFO) << offset2 << f;
@ -160,7 +160,7 @@ void TRTOptimizationPass::PrintDebugInfo(
LOG(INFO) << "Save Op = " << item.save_op;
LOG(INFO) << "Restore Op = " << item.restore_op;
LOG(INFO) << "save_restore_loc_tensor = " << item.save_restore_loc_tensor;
if (item.keep_ops.size()) {
if (!item.keep_ops.empty()) {
LOG(INFO) << offset << "keep ops :";
for (const auto& f : item.keep_ops) {
LOG(INFO) << offset2 << f;
@ -197,7 +197,7 @@ tensorflow::Status TRTOptimizationPass::Optimize(
PrintDebugInfo(cluster, item);
}
int max_dim = -1;
if (item.feed.size()) {
if (!item.feed.empty()) {
for (const auto& f : item.feed) {
const auto& shape = f.second.shape();
if (shape.dims() > 0) {

View File

@ -135,7 +135,7 @@ void TRTInt8Calibrator::setDone() {
void TRTInt8Calibrator::writeCalibrationCache(const void* ptr,
std::size_t length) {
calibration_table_ = string((const char*)ptr, length);
calibration_table_ = string(static_cast<const char*>(ptr), length);
VLOG(1) << "Got calibration data for " << engine_name_ << " @" << ptr
<< " length=" << length;
}

View File

@ -48,7 +48,7 @@ Status TRTCalibrationResource::SerializeToString(string* serialized) {
calibrator_->waitAndSetDone();
thr_->join();
*serialized = calibrator_->getCalibrationTableAsString();
if (!serialized->size()) {
if (serialized->empty()) {
return tensorflow::errors::Unknown("Calibration table is empty.");
}
return Status::OK();

View File

@ -400,7 +400,7 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
module->set_config(config);
}
if (backend_config != "") {
if (!backend_config.empty()) {
// Set backend configuration if it is given.
HloInstruction* instruction =
module->entry_computation()->root_instruction();
@ -409,9 +409,10 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
auto executable =
test_runner_.CreateExecutable(std::move(module), run_hlo_passes);
if (!executable.ok())
if (!executable.ok()) {
return ::testing::AssertionFailure()
<< executable.status().error_message();
}
executables[i] = std::move(executable.ValueOrDie());
}
@ -419,8 +420,9 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
auto output =
test_runner_.Execute(std::move(executables[i]), fake_argument_ptrs[i],
/*profile=*/&((*profiles)[i]));
if (!output.ok())
if (!output.ok()) {
return ::testing::AssertionFailure() << output.status().error_message();
}
}
return ::testing::AssertionSuccess();