diff --git a/tensorflow/core/common_runtime/shape_refiner_test.cc b/tensorflow/core/common_runtime/shape_refiner_test.cc index 5a1ea0e0658..3812a8c181d 100644 --- a/tensorflow/core/common_runtime/shape_refiner_test.cc +++ b/tensorflow/core/common_runtime/shape_refiner_test.cc @@ -980,10 +980,10 @@ TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackInt64) { InputList inputs{ // clang-format off - Input(ops::Const(root, 10LL)), - Input(ops::Const(root, 20LL)), + Input(ops::Const(root, int64{10})), + Input(ops::Const(root, int64{20})), Input(Output(scalar_non_const)), - Input(ops::Const(root, 1LL << 40)), + Input(ops::Const(root, int64{1} << 40)), }; // clang-format on auto pack = ops::Stack(root, inputs); TF_ASSERT_OK(root.status()); @@ -1008,8 +1008,8 @@ TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackUnknownDim) { Scope root = Scope::NewRootScope(); InputList inputs{ - Input(ops::Const(root, 10LL)), - Input(ops::Const(root, -1LL)), + Input(ops::Const(root, int64{10})), + Input(ops::Const(root, int64{-1})), }; auto pack = ops::Stack(root, inputs); TF_ASSERT_OK(root.status()); @@ -1035,8 +1035,8 @@ TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackInvalidInput) { // Inputs are length 2 vectors instead of scalars. InputList inputs{ - Input(ops::Const(root, {10LL, 20LL})), - Input(ops::Const(root, {10LL, 21LL})), + Input(ops::Const(root, {int64{10}, int64{20}})), + Input(ops::Const(root, {int64{10}, int64{21}})), }; auto pack = ops::Stack(root, inputs); TF_ASSERT_OK(root.status()); diff --git a/tensorflow/core/debug/debug_io_utils.cc b/tensorflow/core/debug/debug_io_utils.cc index 643dde7ad8c..c4e99adf0e2 100644 --- a/tensorflow/core/debug/debug_io_utils.cc +++ b/tensorflow/core/debug/debug_io_utils.cc @@ -395,11 +395,12 @@ Status DebugIO::PublishDebugMetadata( } else if (absl::StartsWith(absl::AsciiStrToLower(url), kFileURLScheme)) { const string dump_root_dir = url.substr(strlen(kFileURLScheme)); const string core_metadata_path = AppendTimestampToFilePath( - io::JoinPath( - dump_root_dir, - strings::StrCat(DebugNodeKey::kMetadataFilePrefix, - DebugIO::kCoreMetadataTag, "sessionrun", - strings::Printf("%.14lld", session_run_index))), + io::JoinPath(dump_root_dir, + strings::StrCat( + DebugNodeKey::kMetadataFilePrefix, + DebugIO::kCoreMetadataTag, "sessionrun", + strings::Printf("%.14lld", static_cast( + session_run_index)))), Env::Default()->NowMicros()); status.Update(DebugFileIO::DumpEventProtoToFile( event, string(io::Dirname(core_metadata_path)), diff --git a/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc b/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc index c023d5ebe48..052d75b60ef 100644 --- a/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc +++ b/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc @@ -557,7 +557,7 @@ tensorflow::Status EagerServiceImpl::GetServerContext( return errors::InvalidArgument(strings::Printf( "Unable to find a context_id matching the specified one " "(%llu). Perhaps the worker was restarted, or the context was GC'd?", - context_id)); + static_cast(context_id))); } *server_context = iter->second; diff --git a/tensorflow/core/distributed_runtime/graph_mgr.cc b/tensorflow/core/distributed_runtime/graph_mgr.cc index 2a98aa1a892..8239dbcc72d 100644 --- a/tensorflow/core/distributed_runtime/graph_mgr.cc +++ b/tensorflow/core/distributed_runtime/graph_mgr.cc @@ -303,7 +303,8 @@ Status GraphMgr::Register( // Inserts one item into table_. { mutex_lock l(mu_); - *graph_handle = strings::Printf("%016llx", ++next_id_); + *graph_handle = + strings::Printf("%016llx", static_cast(++next_id_)); item->handle = *graph_handle; CHECK(table_.insert({*graph_handle, item}).second); } diff --git a/tensorflow/core/kernels/data/batch_dataset_op.cc b/tensorflow/core/kernels/data/batch_dataset_op.cc index 14a5f9fc2a5..0d454a0abf2 100644 --- a/tensorflow/core/kernels/data/batch_dataset_op.cc +++ b/tensorflow/core/kernels/data/batch_dataset_op.cc @@ -54,7 +54,8 @@ class BatchDatasetOp::Dataset : public DatasetBase { input_(input), op_version_(op_version), traceme_metadata_( - {{"batch_size", strings::Printf("%lld", batch_size)}, + {{"batch_size", + strings::Printf("%lld", static_cast(batch_size))}, {"drop_remainder", drop_remainder ? "true" : "false"}, {"parallel_copy", parallel_copy ? "true" : "false"}}) { input_->Ref(); diff --git a/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc b/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc index 8704bc53d00..dbb3610fd66 100644 --- a/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc @@ -100,7 +100,8 @@ class MapAndBatchDatasetOp::Dataset : public DatasetBase { traceme_metadata_( {{"autotune", num_parallel_calls == model::kAutotune ? "true" : "false"}, - {"batch_size", strings::Printf("%lld", batch_size)}, + {"batch_size", + strings::Printf("%lld", static_cast(batch_size))}, {"drop_remainder", drop_remainder ? "true" : "false"}}) { input_->Ref(); } @@ -285,8 +286,8 @@ class MapAndBatchDatasetOp::Dataset : public DatasetBase { } TraceMeMetadata GetTraceMeMetadata() const override { - int64 parallelism = -1; - int64 max_batch_results = -1; + long long parallelism = -1; // NOLINT + long long max_batch_results = -1; // NOLINT // NOTE: We only set the parallelism value if the lock can be acquired // right away to avoid introducing tracing overhead. if (mu_->try_lock()) { diff --git a/tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.cc b/tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.cc index 4e2186b93b5..09344881f7d 100644 --- a/tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.cc @@ -107,8 +107,10 @@ class ParallelInterleaveDatasetOp::Dataset : public DatasetBase { output_types_(output_types), output_shapes_(output_shapes), traceme_metadata_( - {{"block_length", strings::Printf("%lld", block_length)}, - {"cycle_length", strings::Printf("%lld", cycle_length)}, + {{"block_length", + strings::Printf("%lld", static_cast(block_length))}, + {"cycle_length", + strings::Printf("%lld", static_cast(cycle_length))}, {"deterministic", deterministic.IsDeterministic() || deterministic.IsDefault() ? "true" diff --git a/tensorflow/core/kernels/data/experimental/rebatch_dataset_op.cc b/tensorflow/core/kernels/data/experimental/rebatch_dataset_op.cc index 78a74f97685..5f224b8a5f4 100644 --- a/tensorflow/core/kernels/data/experimental/rebatch_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/rebatch_dataset_op.cc @@ -62,7 +62,8 @@ class RebatchDatasetOp : public UnaryDatasetOpKernel { output_types_(output_types), output_shapes_(output_shapes), traceme_metadata_( - {{"num_replicas", strings::Printf("%lld", num_replicas)}}) { + {{"num_replicas", strings::Printf("%lld", static_cast( + num_replicas))}}) { input_->Ref(); } diff --git a/tensorflow/core/kernels/data/experimental/snapshot_dataset_op.cc b/tensorflow/core/kernels/data/experimental/snapshot_dataset_op.cc index a07228aa1e4..d7eff8df710 100644 --- a/tensorflow/core/kernels/data/experimental/snapshot_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/snapshot_dataset_op.cc @@ -1206,7 +1206,9 @@ class SnapshotDatasetOp : public UnaryDatasetOpKernel { string GetSnapshotFilename() { mutex_lock l(mu_); string snapshot_data_filename = io::JoinPath( - run_dir_, strings::Printf("%08llu.snapshot", next_file_index_)); + run_dir_, strings::Printf( + "%08llu.snapshot", + static_cast(next_file_index_))); next_file_index_++; return snapshot_data_filename; } diff --git a/tensorflow/core/kernels/data/interleave_dataset_op.cc b/tensorflow/core/kernels/data/interleave_dataset_op.cc index c389998066b..cb8423435f8 100644 --- a/tensorflow/core/kernels/data/interleave_dataset_op.cc +++ b/tensorflow/core/kernels/data/interleave_dataset_op.cc @@ -62,8 +62,10 @@ class InterleaveDatasetOp::Dataset : public DatasetBase { output_types_(output_types), output_shapes_(output_shapes), traceme_metadata_( - {{"block_length", strings::Printf("%lld", block_length)}, - {"cycle_length", strings::Printf("%lld", cycle_length)}}) { + {{"block_length", + strings::Printf("%lld", static_cast(block_length))}, + {"cycle_length", + strings::Printf("%lld", static_cast(cycle_length))}}) { input_->Ref(); } diff --git a/tensorflow/core/kernels/data/padded_batch_dataset_op.cc b/tensorflow/core/kernels/data/padded_batch_dataset_op.cc index e63680e3c9b..12800c27eff 100644 --- a/tensorflow/core/kernels/data/padded_batch_dataset_op.cc +++ b/tensorflow/core/kernels/data/padded_batch_dataset_op.cc @@ -61,7 +61,8 @@ class PaddedBatchDatasetOp::Dataset : public DatasetBase { input_(input), op_version_(op_version), traceme_metadata_( - {{"batch_size", strings::Printf("%lld", batch_size)}, + {{"batch_size", + strings::Printf("%lld", static_cast(batch_size))}, {"drop_remainder", drop_remainder ? "true" : "false"}}) { input_->Ref(); diff --git a/tensorflow/core/kernels/data/parallel_interleave_dataset_op.cc b/tensorflow/core/kernels/data/parallel_interleave_dataset_op.cc index 7d0529abedb..e6920d22d7f 100644 --- a/tensorflow/core/kernels/data/parallel_interleave_dataset_op.cc +++ b/tensorflow/core/kernels/data/parallel_interleave_dataset_op.cc @@ -172,8 +172,10 @@ class ParallelInterleaveDatasetOp::Dataset : public DatasetBase { traceme_metadata_( {{"autotune", num_parallel_calls == model::kAutotune ? "true" : "false"}, - {"block_length", strings::Printf("%lld", block_length)}, - {"cycle_length", strings::Printf("%lld", cycle_length)}, + {"block_length", + strings::Printf("%lld", static_cast(block_length))}, + {"cycle_length", + strings::Printf("%lld", static_cast(cycle_length))}, {"deterministic", deterministic.IsNondeterministic() ? "false" : "true"}}) { input_->Ref(); @@ -467,8 +469,9 @@ class ParallelInterleaveDatasetOp::Dataset : public DatasetBase { mu_->unlock(); } auto result = dataset()->traceme_metadata_; - result.push_back( - std::make_pair("parallelism", strings::Printf("%lld", parallelism))); + result.push_back(std::make_pair( + "parallelism", + strings::Printf("%lld", static_cast(parallelism)))); return result; } diff --git a/tensorflow/core/kernels/data/parallel_map_dataset_op.cc b/tensorflow/core/kernels/data/parallel_map_dataset_op.cc index 22dbf5e9166..fcb83cbd03f 100644 --- a/tensorflow/core/kernels/data/parallel_map_dataset_op.cc +++ b/tensorflow/core/kernels/data/parallel_map_dataset_op.cc @@ -471,8 +471,9 @@ class ParallelMapIterator : public DatasetBaseIterator { result.push_back(std::make_pair("autotune", autotune_ ? "true" : "false")); result.push_back( std::make_pair("deterministic", deterministic_ ? "true" : "false")); - result.push_back( - std::make_pair("parallelism", strings::Printf("%lld", parallelism))); + result.push_back(std::make_pair( + "parallelism", + strings::Printf("%lld", static_cast(parallelism)))); return result; } diff --git a/tensorflow/core/kernels/data/prefetch_dataset_op.cc b/tensorflow/core/kernels/data/prefetch_dataset_op.cc index fbad1bcbfae..27c2ca57854 100644 --- a/tensorflow/core/kernels/data/prefetch_dataset_op.cc +++ b/tensorflow/core/kernels/data/prefetch_dataset_op.cc @@ -278,11 +278,13 @@ class PrefetchDatasetOp::Dataset : public DatasetBase { mu_->unlock(); } data::TraceMeMetadata result; - result.push_back( - std::make_pair("buffer_limit", strings::Printf("%lld", limit))); + result.push_back(std::make_pair( + "buffer_limit", + strings::Printf("%lld", static_cast(limit)))); if (dataset()->slack_period_ > 0) { - result.push_back( - std::make_pair("slack", strings::Printf("%lld", slack_us_.load()))); + result.push_back(std::make_pair( + "slack", + strings::Printf("%lld", static_cast(slack_us_.load())))); } return result; } diff --git a/tensorflow/core/kernels/data/shard_dataset_op.cc b/tensorflow/core/kernels/data/shard_dataset_op.cc index 83668e43552..9d6f81ced96 100644 --- a/tensorflow/core/kernels/data/shard_dataset_op.cc +++ b/tensorflow/core/kernels/data/shard_dataset_op.cc @@ -48,8 +48,9 @@ class ShardDatasetOp::Dataset : public DatasetBase { input_(input), require_non_empty_(require_non_empty), traceme_metadata_( - {{"index", strings::Printf("%lld", index)}, - {"num_shards", strings::Printf("%lld", num_shards)}}) { + {{"index", strings::Printf("%lld", static_cast(index))}, + {"num_shards", + strings::Printf("%lld", static_cast(num_shards))}}) { input_->Ref(); } diff --git a/tensorflow/core/kernels/data/shuffle_dataset_op.cc b/tensorflow/core/kernels/data/shuffle_dataset_op.cc index 8c21187fee4..ce68f533664 100644 --- a/tensorflow/core/kernels/data/shuffle_dataset_op.cc +++ b/tensorflow/core/kernels/data/shuffle_dataset_op.cc @@ -108,7 +108,8 @@ class ShuffleDatasetOpBase::ShuffleDatasetBase : public DatasetBase { buffer_size_(buffer_size), count_(count), traceme_metadata_( - {{"buffer_size", strings::Printf("%lld", buffer_size)}}) { + {{"buffer_size", + strings::Printf("%lld", static_cast(buffer_size))}}) { input_->Ref(); } diff --git a/tensorflow/core/kernels/data/window_dataset_op.cc b/tensorflow/core/kernels/data/window_dataset_op.cc index 5c552c1472b..7db3f0a6a5b 100644 --- a/tensorflow/core/kernels/data/window_dataset_op.cc +++ b/tensorflow/core/kernels/data/window_dataset_op.cc @@ -54,9 +54,12 @@ class WindowDatasetOp::Dataset : public DatasetBase { output_dtypes_(input_->output_dtypes().size(), {DT_VARIANT}), output_shapes_(input_->output_shapes().size(), TensorShape({})), traceme_metadata_( - {{"window_size", strings::Printf("%lld", window_size)}, - {"window_shift", strings::Printf("%lld", window_shift)}, - {"window_stride", strings::Printf("%lld", window_stride)}}) { + {{"window_size", + strings::Printf("%lld", static_cast(window_size))}, + {"window_shift", + strings::Printf("%lld", static_cast(window_shift))}, + {"window_stride", strings::Printf("%lld", static_cast( + window_stride))}}) { input_->Ref(); } diff --git a/tensorflow/core/ops/math_grad.cc b/tensorflow/core/ops/math_grad.cc index 18f884da3c9..decafe59c59 100644 --- a/tensorflow/core/ops/math_grad.cc +++ b/tensorflow/core/ops/math_grad.cc @@ -78,7 +78,7 @@ REGISTER_OP_GRADIENT("Reciprocal", InvGrad); Status SquareGrad(const AttrSlice& attrs, FunctionDef* g) { // clang-format off return GradForUnaryCwise(g, { - FDH::Const("c", 2LL), + FDH::Const("c", int64{2}), {{"two"}, "Cast", {"c"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}, {{"x2"}, "Mul", {"x", "two"}, {}, {"dy"}}, // x * 2 {{"dx"}, "Mul", {"dy", "x2"}}, // dy * (x * 2) @@ -619,7 +619,7 @@ REGISTER_OP_GRADIENT("Xdivy", XdivyGrad); Status SquaredDifferenceGrad(const AttrSlice& attrs, FunctionDef* g) { // clang-format off return GradForBinaryCwise(g, { - FDH::Const("c", 2LL), + FDH::Const("c", int64{2}), {{"two"}, "Cast", {"c"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}, {{"x_sub_y"}, "Sub", {"x", "y"}}, {{"two_x_sub_y"}, "Mul", {"two", "x_sub_y"}}, // 2 * (x - y) diff --git a/tensorflow/core/platform/cloud/curl_http_request.cc b/tensorflow/core/platform/cloud/curl_http_request.cc index f27ce191e1a..12787032fdc 100644 --- a/tensorflow/core/platform/cloud/curl_http_request.cc +++ b/tensorflow/core/platform/cloud/curl_http_request.cc @@ -141,7 +141,8 @@ CurlHttpRequest::CurlHttpRequest(LibCurl* libcurl, Env* env) // TODO(b/74351157): Enable HTTP/2. // Set up the progress meter. - CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_NOPROGRESS, 0ULL)); + CHECK_CURL_OK( + libcurl_->curl_easy_setopt(curl_, CURLOPT_NOPROGRESS, uint64{0})); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFODATA, this)); CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFOFUNCTION, &CurlHttpRequest::ProgressCallback)); diff --git a/tensorflow/core/platform/env.cc b/tensorflow/core/platform/env.cc index 20f37fe6cc5..b98fd3c4cb1 100644 --- a/tensorflow/core/platform/env.cc +++ b/tensorflow/core/platform/env.cc @@ -400,7 +400,7 @@ bool Env::CreateUniqueFileName(string* prefix, const string& suffix) { #else int32 pid = static_cast(getpid()); #endif - uint64 now_microsec = NowMicros(); + long long now_microsec = NowMicros(); // NOLINT *prefix += strings::Printf("%s-%x-%d-%llx", port::Hostname().c_str(), tid, pid, now_microsec); diff --git a/tensorflow/core/platform/numbers.cc b/tensorflow/core/platform/numbers.cc index 3b380b0e883..c8a73b05d44 100644 --- a/tensorflow/core/platform/numbers.cc +++ b/tensorflow/core/platform/numbers.cc @@ -439,7 +439,7 @@ string HumanReadableNum(int64 value) { value = -value; } if (value < 1000) { - Appendf(&s, "%lld", value); + Appendf(&s, "%lld", static_cast(value)); } else if (value >= static_cast(1e15)) { // Number bigger than 1E15; use that notation. Appendf(&s, "%0.3G", static_cast(value)); @@ -472,7 +472,7 @@ string HumanReadableNumBytes(int64 num_bytes) { // No fractions for bytes. char buf[8]; // Longest possible string is '-XXXXB' snprintf(buf, sizeof(buf), "%s%lldB", neg_str, - static_cast(num_bytes)); + static_cast(num_bytes)); return string(buf); } diff --git a/tensorflow/core/util/debug_events_writer_test.cc b/tensorflow/core/util/debug_events_writer_test.cc index 925fec7add1..66cde55864b 100644 --- a/tensorflow/core/util/debug_events_writer_test.cc +++ b/tensorflow/core/util/debug_events_writer_test.cc @@ -68,8 +68,9 @@ class DebugEventsWriterTest : public ::testing::Test { } void SetUp() override { - dump_root_ = io::JoinPath(testing::TmpDir(), - strings::Printf("%010lld", env()->NowMicros())); + dump_root_ = io::JoinPath( + testing::TmpDir(), + strings::Printf("%010lld", static_cast(env()->NowMicros()))); } void TearDown() override { diff --git a/tensorflow/core/util/events_writer.cc b/tensorflow/core/util/events_writer.cc index 4585b98c705..482812eb5cc 100644 --- a/tensorflow/core/util/events_writer.cc +++ b/tensorflow/core/util/events_writer.cc @@ -66,7 +66,7 @@ Status EventsWriter::InitIfNeeded() { filename_ = strings::Printf("%s.out.tfevents.%010lld.%s%s", file_prefix_.c_str(), - static_cast(time_in_seconds), + static_cast(time_in_seconds), port::Hostname().c_str(), file_suffix_.c_str()); // Reset recordio_writer (which has a reference to recordio_file_) so final diff --git a/tensorflow/python/client/session_ref.cc b/tensorflow/python/client/session_ref.cc index db4426ec05f..d911e185153 100644 --- a/tensorflow/python/client/session_ref.cc +++ b/tensorflow/python/client/session_ref.cc @@ -53,7 +53,8 @@ struct RunCounter { }; std::string SessionToHandle(Session* session) { - return strings::Printf("%llu", reinterpret_cast(session)); + return strings::Printf("%llu", static_cast( + reinterpret_cast(session))); } // The Session interface has many methods of the form: diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc index 2768777426b..9128f52ee9c 100755 --- a/tensorflow/stream_executor/cuda/cuda_dnn.cc +++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc @@ -2620,8 +2620,8 @@ port::StatusOr GetCudnnConvolutionForwardAlgorithm( bool specify_workspace_limit = scratch_allocator != nullptr; auto memory_limit_bytes = specify_workspace_limit - ? std::max(scratch_allocator->GetMemoryLimitInBytes(), 0ll) - : 0ll; + ? std::max(scratch_allocator->GetMemoryLimitInBytes(), int64{0}) + : int64{0}; SE_ASSIGN_OR_RETURN(cudnnConvolutionFwdAlgo_t algo, GetCudnnConvolutionForwardAlgo( cudnn, input_nd, filter, conv, output_nd, @@ -2673,8 +2673,8 @@ port::StatusOr GetCudnnConvolutionBackwardDataAlgorithm( bool specify_workspace_limit = scratch_allocator != nullptr; auto memory_limit_bytes = specify_workspace_limit - ? std::max(scratch_allocator->GetMemoryLimitInBytes(), 0ll) - : 0ll; + ? std::max(scratch_allocator->GetMemoryLimitInBytes(), int64{0}) + : int64{0}; SE_ASSIGN_OR_RETURN(cudnnConvolutionBwdDataAlgo_t algo, GetCudnnConvolutionBackwardDataAlgo( cudnn, input_nd, filter, conv, output_nd, @@ -2725,8 +2725,8 @@ port::StatusOr GetCudnnConvolutionBackwardFilterAlgorithm( bool specify_workspace_limit = scratch_allocator != nullptr; auto memory_limit_bytes = specify_workspace_limit - ? std::max(scratch_allocator->GetMemoryLimitInBytes(), 0ll) - : 0ll; + ? std::max(scratch_allocator->GetMemoryLimitInBytes(), int64{0}) + : int64{0}; SE_ASSIGN_OR_RETURN(cudnnConvolutionBwdFilterAlgo_t algo, GetCudnnConvolutionBackwardFilterAlgo( cudnn, input_nd, filter, conv, output_nd,