From 27643b326c990d1ea59d0db25eb19bc31ebc1809 Mon Sep 17 00:00:00 2001 From: Kazuaki Ishizaki Date: Fri, 20 Dec 2019 06:49:46 +0900 Subject: [PATCH] minor spelling tweaks --- tensorflow/c/eager/c_api.cc | 2 +- tensorflow/c/eager/c_api.h | 2 +- tensorflow/c/eager/tape.h | 2 +- .../filesystem/filesystem_interface.h | 2 +- .../filesystem/modular_filesystem.h | 2 +- .../filesystem/modular_filesystem_test.cc | 226 +++++++-------- .../plugins/posix/posix_filesystem_helper.cc | 2 +- tensorflow/c/ops_test.cc | 4 +- tensorflow/c/tf_tensor.cc | 2 +- tensorflow/cc/framework/gradients.cc | 6 +- tensorflow/cc/gradients/nn_grad.cc | 2 +- .../analysis/side_effect_analysis.cc | 2 +- .../analysis/side_effect_analysis.h | 2 +- .../translate/control_to_executor_dialect.cc | 4 +- .../tf2tensorrt/kernels/trt_engine_op.cc | 2 +- tensorflow/compiler/xla/client/xla_builder.h | 2 +- tensorflow/compiler/xla/layout_util.cc | 2 +- .../xla/service/cpu/cpu_executable.cc | 2 +- .../xla/service/elemental_ir_emitter.cc | 2 +- .../xla/service/gpu/custom_call_test.cc | 2 +- .../xla/service/gpu/gpu_executable.cc | 2 +- .../compiler/xla/service/hlo_verifier.cc | 2 +- .../compiler/xla/service/layout_assignment.h | 4 +- .../android/jni/object_tracking/frame_pair.cc | 2 +- .../jni/object_tracking/tracked_object.cc | 4 +- .../jni/object_tracking/tracked_object.h | 2 +- .../speech_commands/recognize_commands.py | 2 +- tensorflow/examples/speech_commands/train.py | 2 +- .../speech_commands/wav_to_features.py | 4 +- tensorflow/go/op/scope.go | 4 +- tensorflow/go/op/wrappers.go | 28 +- tensorflow/java/src/gen/cc/op_specs.h | 2 +- .../java/src/gen/cc/source_writer_test.cc | 4 +- tensorflow/python/BUILD | 2 +- .../python/keras/layers/recurrent_v2.py | 2 +- .../python/keras/saving/hdf5_format_test.py | 4 +- .../python/kernel_tests/scatter_ops_test.py | 2 +- tensorflow/python/module/module_test.py | 2 +- tensorflow/python/ops/metrics_impl.py | 2 +- .../internal/model_analyzer_testlib.py | 2 +- tensorflow/python/profiler/profiler_test.py | 2 +- tensorflow/python/saved_model/utils_test.py | 2 +- tensorflow/python/training/momentum_test.py | 2 +- tensorflow/stream_executor/blas.h | 2 +- tensorflow/stream_executor/cuda/cuda_dnn.cc | 2 +- tensorflow/stream_executor/cuda/cudnn_6_0.inc | 4 +- tensorflow/stream_executor/cuda/cudnn_7_0.inc | 4 +- tensorflow/stream_executor/cuda/cudnn_7_1.inc | 4 +- tensorflow/stream_executor/cuda/cudnn_7_3.inc | 4 +- tensorflow/stream_executor/cuda/cudnn_7_4.inc | 4 +- tensorflow/stream_executor/cuda/cudnn_7_6.inc | 4 +- .../stream_executor/cuda/cusparse_9_0.inc | 6 +- .../stream_executor/device_description.cc | 2 +- tensorflow/stream_executor/device_memory.h | 2 +- tensorflow/stream_executor/dnn.h | 2 +- tensorflow/stream_executor/gpu/gpu_executor.h | 6 +- tensorflow/stream_executor/gpu/gpu_timer.h | 2 +- .../stream_executor/multi_platform_manager.h | 4 +- tensorflow/stream_executor/rocm/rocm_blas.cc | 262 +++++++++--------- tensorflow/stream_executor/rocm/rocm_blas.h | 2 +- tensorflow/stream_executor/rocm/rocm_dnn.cc | 8 +- tensorflow/stream_executor/rocm/rocm_fft.cc | 10 +- .../stream_executor/scratch_allocator.h | 2 +- .../stream_executor/stream_executor_pimpl.h | 2 +- .../Dockerfile.rbe.cuda10.0-cudnn7-centos6.sh | 2 +- .../Dockerfile.rbe.cuda10.1-cudnn7-centos6.sh | 2 +- .../tools/ci_build/builds/docker_test.sh | 2 +- tensorflow/tools/ci_build/builds/pip.sh | 2 +- tensorflow/tools/ci_build/builds/pip_new.sh | 2 +- .../tools/ci_build/builds/test_user_ops.sh | 2 +- .../tools/ci_build/linux/cpu/run_mkl.sh | 2 +- .../tools/compatibility/all_renames_v2.py | 2 +- .../tools/compatibility/tf_upgrade_v2.py | 2 +- .../tools/compatibility/tf_upgrade_v2_test.py | 2 +- tensorflow/tools/docs/doc_controls.py | 4 +- .../tools/docs/doc_generator_visitor.py | 2 +- tensorflow/tools/docs/parser.py | 4 +- tensorflow/tools/docs/parser_test.py | 2 +- tensorflow/tools/docs/pretty_docs.py | 2 +- .../remove_control_dependencies.cc | 2 +- .../tools/graph_transforms/transform_utils.cc | 2 +- .../gen_proto_text_functions_lib_test.cc | 2 +- .../compat_checker/compat_checker.py | 12 +- .../config_detector/config_detector.py | 2 +- .../clang_toolchain/cc_configure_clang.bzl | 4 +- third_party/flatbuffers/build_defs.bzl | 6 +- .../windows/msvc_wrapper_for_nvcc.py.tpl | 4 +- .../windows/msvc_wrapper_for_nvcc.py | 4 +- .../windows/msvc_wrapper_for_nvcc.py | 4 +- .../windows/msvc_wrapper_for_nvcc.py | 4 +- .../windows/msvc_wrapper_for_nvcc.py | 4 +- 91 files changed, 389 insertions(+), 389 deletions(-) diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc index 29414edf601..5362f9ef0f3 100644 --- a/tensorflow/c/eager/c_api.cc +++ b/tensorflow/c/eager/c_api.cc @@ -518,7 +518,7 @@ tensorflow::Status UpdateTFE_ContextWithServerDef( grpc_server->worker_env()->device_mgr->ListDeviceAttributes( &local_device_attributes); - // This request make sure that we can create Rendevzous properly between + // This request make sure that we can create Rendezvous properly between // Local and Remote context. tensorflow::eager::CreateContextRequest base_request; for (const auto& da : cluster_device_attributes) { diff --git a/tensorflow/c/eager/c_api.h b/tensorflow/c/eager/c_api.h index a29755239fd..070b3a9bb60 100644 --- a/tensorflow/c/eager/c_api.h +++ b/tensorflow/c/eager/c_api.h @@ -213,7 +213,7 @@ TF_CAPI_EXPORT extern void TFE_DeleteTensorDebugInfo( TFE_TensorDebugInfo* debug_info); // Returns the number of dimensions used to represent the tensor on its device. -// The number of dimensions used to reprensent the tensor on device can be +// The number of dimensions used to represent the tensor on device can be // different from the number returned by TFE_TensorHandleNumDims. // The return value was current at the time of TFE_TensorDebugInfo creation. TF_CAPI_EXPORT extern int TFE_TensorDebugInfoOnDeviceNumDims( diff --git a/tensorflow/c/eager/tape.h b/tensorflow/c/eager/tape.h index 5c799f778fe..47c42b38e96 100644 --- a/tensorflow/c/eager/tape.h +++ b/tensorflow/c/eager/tape.h @@ -284,7 +284,7 @@ class ForwardAccumulator { // Temporarily push or pop transient state for this accumulator. // // Allows an accumulator which is currently processing an operation to - // temporarily reset its state. Without pushing and poping, accumulators + // temporarily reset its state. Without pushing and popping, accumulators // ignore operations executed as a direct result of their own jvp // computations. void PushState() { call_state_.emplace(nullptr, false); } diff --git a/tensorflow/c/experimental/filesystem/filesystem_interface.h b/tensorflow/c/experimental/filesystem/filesystem_interface.h index bdd170d1310..60195f88856 100644 --- a/tensorflow/c/experimental/filesystem/filesystem_interface.h +++ b/tensorflow/c/experimental/filesystem/filesystem_interface.h @@ -529,7 +529,7 @@ typedef struct TF_FilesystemOps { /// If `statuses` is not null, plugins must fill each element with detailed /// status for each file, as if calling `path_exists` on each one. Core /// TensorFlow initializes the `statuses` array and plugins must use - /// `TF_SetStatus` to set each element instead of dirrectly assigning. + /// `TF_SetStatus` to set each element instead of directly assigning. /// /// DEFAULT IMPLEMENTATION: Checks existence of every file. Needs /// `path_exists`. diff --git a/tensorflow/c/experimental/filesystem/modular_filesystem.h b/tensorflow/c/experimental/filesystem/modular_filesystem.h index 386592d1c6b..19a631ffc5d 100644 --- a/tensorflow/c/experimental/filesystem/modular_filesystem.h +++ b/tensorflow/c/experimental/filesystem/modular_filesystem.h @@ -32,7 +32,7 @@ namespace tensorflow { // TODO(b/143949615): After all filesystems are converted, this file will be // moved to core/platform, and this class can become a singleton and replace the // need for `Env::Default()`. At that time, we might decide to remove the need -// for `Env::Default()` altoghether, but that's a different project, not in +// for `Env::Default()` altogether, but that's a different project, not in // scope for now. I'm just mentioning this here as that transition will mean // removal of the registration part from `Env` and adding it here instead: we // will need tables to hold for each scheme the function tables that implement diff --git a/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc b/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc index ff1d63934da..a89f7ee4fbe 100644 --- a/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc +++ b/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc @@ -146,7 +146,7 @@ int ModularFileSystemTest::rng_val_; // As some of the implementations might be missing, the tests should still pass // if the returned `Status` signals the unimplemented state. -bool UninmplementedOrReturnsCode(Status actual_status, Code expected_code) { +bool UnimplementedOrReturnsCode(Status actual_status, Code expected_code) { Code actual_code = actual_status.code(); return (actual_code == Code::UNIMPLEMENTED) || (actual_code == expected_code); } @@ -193,14 +193,14 @@ TEST_P(ModularFileSystemTest, TestCreateFile) { const std::string filepath = GetURIForPath("a_file"); std::unique_ptr new_file; Status status = env_->NewWritableFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestCreateFileNonExisting) { const std::string filepath = GetURIForPath("dir_not_found/a_file"); std::unique_ptr new_file; Status status = env_->NewWritableFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestCreateFileExistingDir) { @@ -210,7 +210,7 @@ TEST_P(ModularFileSystemTest, TestCreateFileExistingDir) { std::unique_ptr new_file; status = env_->NewWritableFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCreateFilePathIsInvalid) { @@ -222,21 +222,21 @@ TEST_P(ModularFileSystemTest, TestCreateFilePathIsInvalid) { const std::string new_path = GetURIForPath("a_file/a_file"); std::unique_ptr new_file; status = env_->NewWritableFile(new_path, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestAppendFile) { const std::string filepath = GetURIForPath("a_file"); std::unique_ptr new_file; Status status = env_->NewAppendableFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestAppendFileNonExisting) { const std::string filepath = GetURIForPath("dir_not_found/a_file"); std::unique_ptr new_file; Status status = env_->NewAppendableFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestAppendFileExistingDir) { @@ -246,7 +246,7 @@ TEST_P(ModularFileSystemTest, TestAppendFileExistingDir) { std::unique_ptr new_file; status = env_->NewAppendableFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCreateThenAppendFile) { @@ -258,7 +258,7 @@ TEST_P(ModularFileSystemTest, TestCreateThenAppendFile) { std::unique_ptr same_file; status = env_->NewAppendableFile(filepath, &same_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestAppendFilePathIsInvalid) { @@ -271,21 +271,21 @@ TEST_P(ModularFileSystemTest, TestAppendFilePathIsInvalid) { const std::string new_path = GetURIForPath("a_file/a_file"); std::unique_ptr same_file; status = env_->NewAppendableFile(new_path, &same_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestReadFile) { const std::string filepath = GetURIForPath("a_file"); std::unique_ptr new_file; Status status = env_->NewRandomAccessFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestReadFileNonExisting) { const std::string filepath = GetURIForPath("dir_not_found/a_file"); std::unique_ptr new_file; Status status = env_->NewRandomAccessFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestReadFileExistingDir) { @@ -295,7 +295,7 @@ TEST_P(ModularFileSystemTest, TestReadFileExistingDir) { std::unique_ptr new_file; status = env_->NewRandomAccessFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCreateThenReadFile) { @@ -307,7 +307,7 @@ TEST_P(ModularFileSystemTest, TestCreateThenReadFile) { std::unique_ptr same_file; status = env_->NewRandomAccessFile(filepath, &same_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestReadFilePathIsInvalid) { @@ -320,21 +320,21 @@ TEST_P(ModularFileSystemTest, TestReadFilePathIsInvalid) { const std::string new_path = GetURIForPath("a_file/a_file"); std::unique_ptr same_file; status = env_->NewRandomAccessFile(new_path, &same_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCreateMemoryRegion) { const std::string filepath = GetURIForPath("a_file"); std::unique_ptr region; Status status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestCreateMemoryRegionNonExisting) { const std::string filepath = GetURIForPath("dir_not_found/a_file"); std::unique_ptr region; Status status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestCreateMemoryRegionExistingDir) { @@ -344,7 +344,7 @@ TEST_P(ModularFileSystemTest, TestCreateMemoryRegionExistingDir) { std::unique_ptr new_file; status = env_->NewReadOnlyMemoryRegionFromFile(filepath, &new_file); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromEmptyFile) { @@ -356,7 +356,7 @@ TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromEmptyFile) { std::unique_ptr region; status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::INVALID_ARGUMENT); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::INVALID_ARGUMENT); } TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromFile) { @@ -376,7 +376,7 @@ TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromFile) { std::unique_ptr region; status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "NewReadOnlyMemoryRegionFromFile() not supported: " << status; @@ -395,19 +395,19 @@ TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromFilePathIsInvalid) { std::string new_path = GetURIForPath("a_file/a_file"); std::unique_ptr region; status = env_->NewReadOnlyMemoryRegionFromFile(new_path, ®ion); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCreateDir) { const std::string dirpath = GetURIForPath("a_dir"); Status status = env_->CreateDir(dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestCreateDirNoParent) { const std::string dirpath = GetURIForPath("dir_not_found/a_dir"); Status status = env_->CreateDir(dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestCreateDirWhichIsFile) { @@ -418,7 +418,7 @@ TEST_P(ModularFileSystemTest, TestCreateDirWhichIsFile) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = env_->CreateDir(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::ALREADY_EXISTS); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::ALREADY_EXISTS); } TEST_P(ModularFileSystemTest, TestCreateDirTwice) { @@ -427,7 +427,7 @@ TEST_P(ModularFileSystemTest, TestCreateDirTwice) { if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status; status = env_->CreateDir(dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::ALREADY_EXISTS); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::ALREADY_EXISTS); } TEST_P(ModularFileSystemTest, TestCreateDirPathIsInvalid) { @@ -439,7 +439,7 @@ TEST_P(ModularFileSystemTest, TestCreateDirPathIsInvalid) { const std::string new_path = GetURIForPath("a_file/a_dir"); status = env_->CreateDir(new_path); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestRecursivelyCreateDir) { @@ -528,7 +528,7 @@ TEST_P(ModularFileSystemTest, TestDeleteFile) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = env_->DeleteFile(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestDeleteFileFromDirectory) { @@ -543,13 +543,13 @@ TEST_P(ModularFileSystemTest, TestDeleteFileFromDirectory) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = env_->DeleteFile(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestDeleteFileDoesNotExist) { const std::string filepath = GetURIForPath("a_file"); Status status = env_->DeleteFile(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestDeleteFileWhichIsDirectory) { @@ -558,7 +558,7 @@ TEST_P(ModularFileSystemTest, TestDeleteFileWhichIsDirectory) { if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status; status = env_->DeleteFile(dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestDeleteFilePathIsInvalid) { @@ -570,7 +570,7 @@ TEST_P(ModularFileSystemTest, TestDeleteFilePathIsInvalid) { const std::string new_path = GetURIForPath("a_file/a_new_file"); status = env_->DeleteFile(new_path); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestDeleteDirectory) { @@ -579,7 +579,7 @@ TEST_P(ModularFileSystemTest, TestDeleteDirectory) { if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status; status = env_->DeleteDir(dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestDeleteDirectoryFromDirectory) { @@ -591,13 +591,13 @@ TEST_P(ModularFileSystemTest, TestDeleteDirectoryFromDirectory) { EXPECT_EQ(env_->CreateDir(target_path).code(), Code::OK); status = env_->DeleteDir(target_path); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestDeleteDirectoryDoesNotExist) { const std::string dirpath = GetURIForPath("a_dir"); Status status = env_->DeleteDir(dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestDeleteDirectoryNotEmpty) { @@ -612,7 +612,7 @@ TEST_P(ModularFileSystemTest, TestDeleteDirectoryNotEmpty) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = env_->DeleteDir(dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestDeleteDirectoryWhichIsFile) { @@ -623,7 +623,7 @@ TEST_P(ModularFileSystemTest, TestDeleteDirectoryWhichIsFile) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = env_->DeleteDir(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestDeleteDirectoryPathIsInvalid) { @@ -635,7 +635,7 @@ TEST_P(ModularFileSystemTest, TestDeleteDirectoryPathIsInvalid) { const std::string new_path = GetURIForPath("a_file/a_dir"); status = env_->DeleteDir(new_path); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestDeleteRecursivelyEmpty) { @@ -774,13 +774,13 @@ TEST_P(ModularFileSystemTest, TestRenameFile) { const std::string new_filepath = GetURIForPath("a_new_file"); status = env_->RenameFile(filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status; status = env_->FileExists(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); status = env_->FileExists(new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestRenameFileOverwrite) { @@ -797,20 +797,20 @@ TEST_P(ModularFileSystemTest, TestRenameFileOverwrite) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = env_->RenameFile(filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status; status = env_->FileExists(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); status = env_->FileExists(new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestRenameFileSourceNotFound) { const std::string filepath = GetURIForPath("a_file"); const std::string new_filepath = GetURIForPath("a_new_file"); Status status = env_->RenameFile(filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestRenameFileDestinationParentNotFound) { @@ -822,7 +822,7 @@ TEST_P(ModularFileSystemTest, TestRenameFileDestinationParentNotFound) { const std::string new_filepath = GetURIForPath("a_dir/a_file"); status = env_->RenameFile(filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestRenameFileSourceIsDirectory) { @@ -832,7 +832,7 @@ TEST_P(ModularFileSystemTest, TestRenameFileSourceIsDirectory) { const std::string new_filepath = GetURIForPath("a_new_file"); status = env_->RenameFile(dirpath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestRenameFileTargetIsDirectory) { @@ -847,7 +847,7 @@ TEST_P(ModularFileSystemTest, TestRenameFileTargetIsDirectory) { if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status; status = env_->RenameFile(filepath, dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestRenameFileSourcePathIsInvalid) { @@ -860,7 +860,7 @@ TEST_P(ModularFileSystemTest, TestRenameFileSourcePathIsInvalid) { const std::string old_filepath = GetURIForPath("a_file/x"); const std::string new_filepath = GetURIForPath("a_new_file"); status = env_->RenameFile(old_filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestRenameFileTargetPathIsInvalid) { @@ -878,7 +878,7 @@ TEST_P(ModularFileSystemTest, TestRenameFileTargetPathIsInvalid) { const std::string new_filepath = GetURIForPath("a_file/a_new_file"); status = env_->RenameFile(old_filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestRenameFileCompareContents) { @@ -898,12 +898,12 @@ TEST_P(ModularFileSystemTest, TestRenameFileCompareContents) { const std::string new_filepath = GetURIForPath("a_new_file"); status = env_->RenameFile(filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status; uint64 size; status = env_->GetFileSize(new_filepath, &size); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status; EXPECT_EQ(size, test_data.size()); } @@ -917,13 +917,13 @@ TEST_P(ModularFileSystemTest, TestCopyFile) { const std::string new_filepath = GetURIForPath("a_new_file"); status = env_->CopyFile(filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "CopyFile() not supported: " << status; status = env_->FileExists(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); status = env_->FileExists(new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestCopyFileOverwrite) { @@ -940,20 +940,20 @@ TEST_P(ModularFileSystemTest, TestCopyFileOverwrite) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = env_->CopyFile(filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "CopyFile() not supported: " << status; status = env_->FileExists(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); status = env_->FileExists(new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestCopyFileSourceNotFound) { const std::string filepath = GetURIForPath("a_file"); const std::string new_filepath = GetURIForPath("a_new_file"); Status status = env_->CopyFile(filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestCopyFileSourceIsDirectory) { @@ -963,7 +963,7 @@ TEST_P(ModularFileSystemTest, TestCopyFileSourceIsDirectory) { const std::string new_filepath = GetURIForPath("a_new_file"); status = env_->CopyFile(dirpath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCopyFileTargetIsDirectory) { @@ -978,7 +978,7 @@ TEST_P(ModularFileSystemTest, TestCopyFileTargetIsDirectory) { if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status; status = env_->CopyFile(filepath, dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCopyFileSourcePathIsInvalid) { @@ -991,7 +991,7 @@ TEST_P(ModularFileSystemTest, TestCopyFileSourcePathIsInvalid) { const std::string old_filepath = GetURIForPath("a_file/x"); const std::string new_filepath = GetURIForPath("a_new_file"); status = env_->CopyFile(old_filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCopyFileTargetPathIsInvalid) { @@ -1009,7 +1009,7 @@ TEST_P(ModularFileSystemTest, TestCopyFileTargetPathIsInvalid) { const std::string new_filepath = GetURIForPath("a_file/a_new_file"); status = env_->CopyFile(old_filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestCopyFileCompareContents) { @@ -1029,17 +1029,17 @@ TEST_P(ModularFileSystemTest, TestCopyFileCompareContents) { const std::string new_filepath = GetURIForPath("a_new_file"); status = env_->CopyFile(filepath, new_filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status; uint64 size; status = env_->GetFileSize(filepath, &size); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status; EXPECT_EQ(size, test_data.size()); status = env_->GetFileSize(new_filepath, &size); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status; EXPECT_EQ(size, test_data.size()); } @@ -1052,7 +1052,7 @@ TEST_P(ModularFileSystemTest, TestFileExists) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = env_->FileExists(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestFileExistsButIsDirectory) { @@ -1061,13 +1061,13 @@ TEST_P(ModularFileSystemTest, TestFileExistsButIsDirectory) { if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status; status = env_->FileExists(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestFileExistsNotFound) { const std::string filepath = GetURIForPath("a_file"); Status status = env_->FileExists(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestFileExistsPathIsInvalid) { @@ -1079,7 +1079,7 @@ TEST_P(ModularFileSystemTest, TestFileExistsPathIsInvalid) { const std::string target_path = GetURIForPath("a_file/a_new_file"); status = env_->FileExists(target_path); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestFilesExist) { @@ -1098,7 +1098,7 @@ TEST_P(ModularFileSystemTest, TestFilesExist) { EXPECT_TRUE(env_->FilesExist(filenames, &statuses)); EXPECT_EQ(statuses.size(), filenames.size()); for (const auto& status : statuses) - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestFilesExistAllFailureModes) { @@ -1121,11 +1121,11 @@ TEST_P(ModularFileSystemTest, TestFilesExistAllFailureModes) { std::vector statuses; EXPECT_FALSE(env_->FilesExist(filenames, &statuses)); EXPECT_EQ(statuses.size(), filenames.size()); - EXPECT_PRED2(UninmplementedOrReturnsCode, statuses[0], Code::OK); - EXPECT_PRED2(UninmplementedOrReturnsCode, statuses[1], Code::OK); - EXPECT_PRED2(UninmplementedOrReturnsCode, statuses[2], + EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[0], Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[1], Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[2], Code::FAILED_PRECONDITION); - EXPECT_PRED2(UninmplementedOrReturnsCode, statuses[3], Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[3], Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestFilesExistsNoFiles) { @@ -1146,7 +1146,7 @@ TEST_P(ModularFileSystemTest, TestStatEmptyFile) { FileStatistics stat; status = env_->Stat(filepath, &stat); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Stat() not supported: " << status; EXPECT_FALSE(stat.is_directory); EXPECT_EQ(stat.length, 0); @@ -1169,7 +1169,7 @@ TEST_P(ModularFileSystemTest, TestStatNonEmptyFile) { FileStatistics stat; status = env_->Stat(filepath, &stat); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Stat() not supported: " << status; EXPECT_FALSE(stat.is_directory); EXPECT_EQ(stat.length, test_data.size()); @@ -1182,7 +1182,7 @@ TEST_P(ModularFileSystemTest, TestStatDirectory) { FileStatistics stat; status = env_->Stat(dirpath, &stat); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Stat() not supported: " << status; EXPECT_TRUE(stat.is_directory); } @@ -1191,7 +1191,7 @@ TEST_P(ModularFileSystemTest, TestStatNotFound) { const std::string dirpath = GetURIForPath("a_dir"); FileStatistics stat; Status status = env_->Stat(dirpath, &stat); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestStatPathIsInvalid) { @@ -1204,7 +1204,7 @@ TEST_P(ModularFileSystemTest, TestStatPathIsInvalid) { const std::string target_path = GetURIForPath("a_file/a_new_file"); FileStatistics stat; status = env_->Stat(target_path, &stat); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestIsDirectory) { @@ -1213,7 +1213,7 @@ TEST_P(ModularFileSystemTest, TestIsDirectory) { if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status; status = env_->IsDirectory(dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); } TEST_P(ModularFileSystemTest, TestIsDirectoryFile) { @@ -1224,13 +1224,13 @@ TEST_P(ModularFileSystemTest, TestIsDirectoryFile) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = env_->IsDirectory(filepath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestIsDirectoryNotFound) { const std::string dirpath = GetURIForPath("a_dir"); Status status = env_->IsDirectory(dirpath); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestIsDirectoryPathIsInvalid) { @@ -1242,7 +1242,7 @@ TEST_P(ModularFileSystemTest, TestIsDirectoryPathIsInvalid) { const std::string target_path = GetURIForPath("a_file/a_new_file"); status = env_->IsDirectory(target_path); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestGetFileSizeEmptyFile) { @@ -1254,7 +1254,7 @@ TEST_P(ModularFileSystemTest, TestGetFileSizeEmptyFile) { uint64 size; status = env_->GetFileSize(filepath, &size); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status; EXPECT_EQ(size, 0); } @@ -1276,7 +1276,7 @@ TEST_P(ModularFileSystemTest, TestGetFileSizeNonEmptyFile) { uint64 size; status = env_->GetFileSize(filepath, &size); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status; EXPECT_EQ(size, test_data.size()); } @@ -1288,14 +1288,14 @@ TEST_P(ModularFileSystemTest, TestGetFileSizeDirectory) { uint64 size; status = env_->GetFileSize(dirpath, &size); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestGetFileSizeNotFound) { const std::string filepath = GetURIForPath("a_dir"); uint64 size; Status status = env_->GetFileSize(filepath, &size); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestGetFileSizePathIsInvalid) { @@ -1308,7 +1308,7 @@ TEST_P(ModularFileSystemTest, TestGetFileSizePathIsInvalid) { const std::string target_path = GetURIForPath("a_file/a_new_file"); uint64 size; status = env_->GetFileSize(target_path, &size); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestGetChildren) { @@ -1340,7 +1340,7 @@ TEST_P(ModularFileSystemTest, TestGetChildren) { std::vector children; status = env_->GetChildren(dirpath, &children); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "GetChildren() not supported: " << status; // All entries must show up in the vector. @@ -1360,7 +1360,7 @@ TEST_P(ModularFileSystemTest, TestGetChildrenEmpty) { std::vector children; status = env_->GetChildren(dirpath, &children); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); EXPECT_EQ(children.size(), 0); } @@ -1373,14 +1373,14 @@ TEST_P(ModularFileSystemTest, TestGetChildrenOfFile) { std::vector children; status = env_->GetChildren(filepath, &children); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestGetChildrenPathNotFound) { const std::string target_path = GetURIForPath("a_dir"); std::vector children; Status status = env_->GetChildren(target_path, &children); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::NOT_FOUND); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND); } TEST_P(ModularFileSystemTest, TestGetChildrenPathIsInvalid) { @@ -1393,7 +1393,7 @@ TEST_P(ModularFileSystemTest, TestGetChildrenPathIsInvalid) { const std::string target_path = GetURIForPath("a_file/a_new_dir"); std::vector children; status = env_->GetChildren(target_path, &children); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION); } TEST_P(ModularFileSystemTest, TestGetMatchingPaths) { @@ -1422,7 +1422,7 @@ TEST_P(ModularFileSystemTest, TestGetMatchingPaths) { std::vector results; Status status = env_->GetMatchingPaths(GetURIForPath("/a*"), &results); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "GetMatchingPaths() not supported: " << status; EXPECT_EQ(results.size(), matching_filenames.size()); @@ -1433,7 +1433,7 @@ TEST_P(ModularFileSystemTest, TestGetMatchingPaths) { TEST_P(ModularFileSystemTest, TestGetMatchingPathsEmptyFileSystem) { std::vector results; Status status = env_->GetMatchingPaths(GetURIForPath("a*"), &results); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); EXPECT_EQ(results.size(), 0); } @@ -1454,7 +1454,7 @@ TEST_P(ModularFileSystemTest, TestGetMatchingPathsEmptyPattern) { std::vector results; Status status = env_->GetMatchingPaths(GetURIForPath(""), &results); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "GetMatchingPaths() not supported: " << status; EXPECT_EQ(results.size(), 1); @@ -1479,7 +1479,7 @@ TEST_P(ModularFileSystemTest, TestGetMatchingPathsLiteralMatch) { std::vector results; Status status = env_->GetMatchingPaths(filenames[0], &results); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "GetMatchingPaths() not supported: " << status; EXPECT_EQ(results.size(), 1); @@ -1506,7 +1506,7 @@ TEST_P(ModularFileSystemTest, TestGetMatchingPathsNoMatch) { Status status = env_->GetMatchingPaths(GetURIForPath("x?y*"), &results); if (!status.ok()) GTEST_SKIP() << "GetMatchingPaths() not supported: " << status; - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); EXPECT_EQ(results.size(), 0); } @@ -1519,13 +1519,13 @@ TEST_P(ModularFileSystemTest, TestAppendAndTell) { int64 position; status = file->Tell(&position); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Tell() not supported: " << status; EXPECT_EQ(position, 0); const std::string test_data("asdf"); status = file->Append(test_data); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status; status = file->Tell(&position); @@ -1541,7 +1541,7 @@ TEST_P(ModularFileSystemTest, TestClose) { GTEST_SKIP() << "NewWritableFile() not supported: " << status; status = file->Close(); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status; } @@ -1554,15 +1554,15 @@ TEST_P(ModularFileSystemTest, TestRoundTrip) { const std::string test_data("asdf"); status = file->Append(test_data); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status; status = file->Flush(); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status; status = file->Close(); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status; std::unique_ptr read_file; @@ -1573,7 +1573,7 @@ TEST_P(ModularFileSystemTest, TestRoundTrip) { char scratch[64 /* big enough to accomodate test_data */] = {0}; StringPiece result; status = read_file->Read(0, test_data.size(), &result, scratch); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); EXPECT_EQ(test_data, result); } @@ -1586,15 +1586,15 @@ TEST_P(ModularFileSystemTest, TestRoundTripWithAppendableFile) { const std::string test_data("asdf"); status = file->Append(test_data); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status; status = file->Flush(); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status; status = file->Close(); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status; std::unique_ptr same_file; @@ -1616,7 +1616,7 @@ TEST_P(ModularFileSystemTest, TestRoundTripWithAppendableFile) { StringPiece result; status = read_file->Read(0, test_data.size() + more_test_data.size(), &result, scratch); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); EXPECT_EQ(test_data + more_test_data, result); EXPECT_EQ( read_file->Read(test_data.size(), more_test_data.size(), &result, scratch) @@ -1634,15 +1634,15 @@ TEST_P(ModularFileSystemTest, TestReadOutOfRange) { const std::string test_data("asdf"); status = file->Append(test_data); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status; status = file->Flush(); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status; status = file->Close(); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OK); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status; std::unique_ptr read_file; @@ -1654,7 +1654,7 @@ TEST_P(ModularFileSystemTest, TestReadOutOfRange) { StringPiece result; // read at least 1 byte more than test_data status = read_file->Read(0, test_data.size() + 1, &result, scratch); - EXPECT_PRED2(UninmplementedOrReturnsCode, status, Code::OUT_OF_RANGE); + EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OUT_OF_RANGE); } // The URI schemes that need to be tested are provided by the user via flags diff --git a/tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem_helper.cc b/tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem_helper.cc index 13fb38c3276..2cdcf74d427 100644 --- a/tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem_helper.cc +++ b/tensorflow/c/experimental/filesystem/plugins/posix/posix_filesystem_helper.cc @@ -44,7 +44,7 @@ int TransferFileContents(const char* src, const char* dst, mode_t mode, } // Both files have been opened, do the transfer. - // Since errno would be overriden by `close` below, save it here. + // Since errno would be overridden by `close` below, save it here. int error_code = 0; if (CopyFileContents(dst_fd, src_fd, size) < 0) error_code = errno; diff --git a/tensorflow/c/ops_test.cc b/tensorflow/c/ops_test.cc index 2e0a8e92b01..482413f966c 100644 --- a/tensorflow/c/ops_test.cc +++ b/tensorflow/c/ops_test.cc @@ -133,7 +133,7 @@ TEST(OpsTest, TestShapeInference_VectorizeFunction) { TEST(OpsTest, AttributeAccessors) { TF_OpDefinitionBuilder* builder = - TF_NewOpDefinitionBuilder("AttributeAccesorsOp"); + TF_NewOpDefinitionBuilder("AttributeAccessorsOp"); TF_OpDefinitionBuilderAddAttr(builder, "foo1: int >= 2"); TF_OpDefinitionBuilderAddAttr(builder, "foo2: string=\"my string\""); TF_OpDefinitionBuilderSetIsCommutative(builder, true); @@ -151,7 +151,7 @@ TEST(OpsTest, AttributeAccessors) { op_list.ParseFromArray(op_list_buffer->data, op_list_buffer->length); bool found = false; for (const auto& op : op_list.op()) { - if (op.name() == "AttributeAccesorsOp") { + if (op.name() == "AttributeAccessorsOp") { ASSERT_TRUE(op.is_commutative()); ASSERT_TRUE(op.is_aggregate()); ASSERT_TRUE(op.allows_uninitialized_input()); diff --git a/tensorflow/c/tf_tensor.cc b/tensorflow/c/tf_tensor.cc index cf88e1a403f..6bb2cafbbc5 100644 --- a/tensorflow/c/tf_tensor.cc +++ b/tensorflow/c/tf_tensor.cc @@ -383,7 +383,7 @@ Status TensorInterface::ToTensor(Tensor* dst) const { if (!dst->scalar()().ParseFromString( string(static_cast(Data()), ByteSize()))) { return InvalidArgument( - "Malformed TF_RESOUCE tensor: unable to parse resource handle"); + "Malformed TF_RESOURCE tensor: unable to parse resource handle"); } return Status::OK(); } diff --git a/tensorflow/cc/framework/gradients.cc b/tensorflow/cc/framework/gradients.cc index 303fdf64ec7..5a00de6a666 100644 --- a/tensorflow/cc/framework/gradients.cc +++ b/tensorflow/cc/framework/gradients.cc @@ -346,8 +346,8 @@ Status SymbolicGradientBuilder::SumGradients(const Output& src, Output* grad) { "Unable to find backprop list for node.id ", src.node()->name()); } const auto& grads = iter->second; - // Filter any backproped 'NoGradient' Outputs from 'grads' (if needed). - // Return any valid backproped gradients that remain after filtering, + // Filter any backpropped 'NoGradient' Outputs from 'grads' (if needed). + // Return any valid backpropped gradients that remain after filtering, // or 'NoGradient' otherwise. std::vector grads_to_keep; for (const Output& o : grads) { @@ -519,7 +519,7 @@ Status SymbolicGradientBuilder::AddGradients() { // Backprop along the in edges. // TODO(andydavis) Find cleaner way to map each grad output returned by // gradient function to the src node/output to which it should be - // backproped. Maybe grad functions can return a vector of Output pairs to + // backpropped. Maybe grad functions can return a vector of Output pairs to // make this association explicit. size_t dx_index = 0; for (const Edge* e : n->in_edges()) { diff --git a/tensorflow/cc/gradients/nn_grad.cc b/tensorflow/cc/gradients/nn_grad.cc index 2a32a2ed6f7..d329b999a5c 100644 --- a/tensorflow/cc/gradients/nn_grad.cc +++ b/tensorflow/cc/gradients/nn_grad.cc @@ -64,7 +64,7 @@ bool IsZero(const Scope& scope, const Output& grad) { // Multiply after broadcasting vec to match dimensions of mat. // Args: // vec: A 1-D tensor of dimension [D0] -// mat: A 2-D tensor of dimesnion [D0, D1] +// mat: A 2-D tensor of dimension [D0, D1] // // Returns: // A tensor of dimension [D0, D1], the result fo vec * mat. diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc index 785f8e7f966..1b11a7c9a5c 100644 --- a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc +++ b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc @@ -413,7 +413,7 @@ void SideEffectAnalysis::AnalyzeRegion( // Returns whether an access to `resource` can skip control edges from // previous accesses to unknown resources, due to that earlier accesses to - // `resource` already indirectly tracked previous accesses to uknown + // `resource` already indirectly tracked previous accesses to unknown // resources. `read_only` specifies the type of access of the current op being // considered. auto unknown_access_indirectly_tracked_by_resource = [&](int64_t resource, diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h index 9457a3e8c6d..9d7a5ce2233 100644 --- a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h +++ b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h @@ -62,7 +62,7 @@ class ResourceAliasAnalysis { // An analysis that runs on a function and infers the control predecessors and // successors for each op, based on side-effects on known and unknown resources. -// Side-effecting ops on uknown resources are conservatively treated as +// Side-effecting ops on unknown resources are conservatively treated as // interfering with all known resource op accesses. It distinguishes accesses // based on whether they are read-only, and read-only ops do not interfer with // each other. diff --git a/tensorflow/compiler/mlir/tensorflow/translate/control_to_executor_dialect.cc b/tensorflow/compiler/mlir/tensorflow/translate/control_to_executor_dialect.cc index 696891289ca..672ba418489 100644 --- a/tensorflow/compiler/mlir/tensorflow/translate/control_to_executor_dialect.cc +++ b/tensorflow/compiler/mlir/tensorflow/translate/control_to_executor_dialect.cc @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// This transformation pass transforms MLIR TF contol dialect into a combination -// of the TF and TF executor dialects. +// This transformation pass transforms MLIR TF control dialect into a +// combination of the TF and TF executor dialects. // // !! This code is only intended for migration purpose and will be deleted when // !! the importer is updated to directly emit the tf_executor dialect. diff --git a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op.cc b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op.cc index e0377c2b1dc..9fbe9bc250a 100644 --- a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op.cc +++ b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op.cc @@ -617,7 +617,7 @@ bool TRTEngineOp::ExecuteTrtEngine(OpKernelContext* ctx, } } else { const string msg = - StrCat("Ouput node ", output_name, " not found, at ", name()); + StrCat("Output node ", output_name, " not found, at ", name()); LOG(ERROR) << msg; ctx->SetStatus(errors::NotFound(msg)); return !kRetry; diff --git a/tensorflow/compiler/xla/client/xla_builder.h b/tensorflow/compiler/xla/client/xla_builder.h index 42126306996..6deda2179c3 100644 --- a/tensorflow/compiler/xla/client/xla_builder.h +++ b/tensorflow/compiler/xla/client/xla_builder.h @@ -329,7 +329,7 @@ class XlaBuilder { int64 target_param_num, ShapeIndex target_param_index, int64 target_dim_num); - // Adds a new input/output alias. Since the input/ouput shape information are + // Adds a new input/output alias. Since the input/output shape information are // not available until the computation is built, and eventual error in the // arguments of this API will be detected only at computation Build() time. void SetUpAlias(const ShapeIndex& output_index, int64 param_number, diff --git a/tensorflow/compiler/xla/layout_util.cc b/tensorflow/compiler/xla/layout_util.cc index 6f8ece1bb10..d2e100bff96 100644 --- a/tensorflow/compiler/xla/layout_util.cc +++ b/tensorflow/compiler/xla/layout_util.cc @@ -66,7 +66,7 @@ void SetDefaultLayoutToContainer(T* minor_to_major) { for (Tile tile : tiles) { for (int64 dim : tile.dimensions()) { if (dim < 0 && dim != Tile::kCombineDimension) { - LOG(FATAL) << "Tile dimension size needs to be mininum int64 value if " + LOG(FATAL) << "Tile dimension size needs to be minimum int64 value if " "it's negative. Value is " << dim; } diff --git a/tensorflow/compiler/xla/service/cpu/cpu_executable.cc b/tensorflow/compiler/xla/service/cpu/cpu_executable.cc index a950f1f3d0f..4deae02ad2c 100644 --- a/tensorflow/compiler/xla/service/cpu/cpu_executable.cc +++ b/tensorflow/compiler/xla/service/cpu/cpu_executable.cc @@ -271,7 +271,7 @@ StatusOr CpuExecutable::CreateResultShapedBuffer( slice.allocation()->parameter_number(), slice.allocation()->param_shape_index()); CHECK(output_alias) - << "Ouput buffer is coming from parameter " + << "Output buffer is coming from parameter " << slice.allocation()->parameter_number() << " at index " << slice.allocation()->param_shape_index() << ", but no alias exists"; diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc index 66801d28f16..c4420932e45 100644 --- a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc +++ b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc @@ -734,7 +734,7 @@ StatusOr ElementalIrEmitter::EmitComplexUnaryOp( // is finite and b is either +/-Inf or NaN, then our normal // calculation would end up returing (+/-1, NaN), as opposed to (NaN, // NaN). - // 5/6) We always calculate the imagninary value as sin(2b)/denominator. + // 5/6) We always calculate the imaginary value as sin(2b)/denominator. // When the denominator is infinity, this assures us that the zero is // the correct sign. However if our imaginary input results in // sin(2b) = NaN, we calculate our imaginary result as NaN. diff --git a/tensorflow/compiler/xla/service/gpu/custom_call_test.cc b/tensorflow/compiler/xla/service/gpu/custom_call_test.cc index 53a3ca14400..de321896df0 100644 --- a/tensorflow/compiler/xla/service/gpu/custom_call_test.cc +++ b/tensorflow/compiler/xla/service/gpu/custom_call_test.cc @@ -48,7 +48,7 @@ TEST_F(CustomCallTest, IsInvoked) { TEST_F(CustomCallTest, UnknownTarget) { XlaBuilder b(TestName()); - CustomCall(&b, "UknownTarget", /*operands=*/{}, ShapeUtil::MakeShape(F32, {}), + CustomCall(&b, "UnknownTarget", /*operands=*/{}, ShapeUtil::MakeShape(F32, {}), /*opaque=*/""); ASSERT_FALSE(Execute(&b, {}).ok()); } diff --git a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc index a879e6faf32..943a7f7491c 100644 --- a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc +++ b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc @@ -417,7 +417,7 @@ StatusOr GpuExecutable::ExecuteAsyncOnStream( slice.allocation()->parameter_number(), slice.allocation()->param_shape_index()); CHECK(output_alias) - << "Ouput buffer is coming from parameter " + << "Output buffer is coming from parameter " << slice.allocation()->parameter_number() << " at index " << slice.allocation()->param_shape_index() << ", but no alias exists"; diff --git a/tensorflow/compiler/xla/service/hlo_verifier.cc b/tensorflow/compiler/xla/service/hlo_verifier.cc index b2beb9dda55..b4d1996373a 100755 --- a/tensorflow/compiler/xla/service/hlo_verifier.cc +++ b/tensorflow/compiler/xla/service/hlo_verifier.cc @@ -1599,7 +1599,7 @@ class InstructionVerifier : public DfsHloVisitorWithDefault { for (int b = 0; b < conditional->branch_count(); ++b) { if (conditional->branch_computation(b)->num_parameters() != 1) { return FailedPrecondition( - "Branch computation %s of %s must have 1 parameter insted of %d", + "Branch computation %s of %s must have 1 parameter instead of %d", conditional->branch_computation(b)->name(), conditional->ToString(), conditional->branch_computation(b)->num_parameters()); } diff --git a/tensorflow/compiler/xla/service/layout_assignment.h b/tensorflow/compiler/xla/service/layout_assignment.h index ef30ec3088b..a04d056c618 100644 --- a/tensorflow/compiler/xla/service/layout_assignment.h +++ b/tensorflow/compiler/xla/service/layout_assignment.h @@ -394,10 +394,10 @@ class LayoutAssignment : public HloModulePass { return Status::OK(); } - // Construct contraints and assign layouts to all instructions in the + // Construct constraints and assign layouts to all instructions in the // computation satisfying the given ComputationLayout, if not nullptr. // Otherwise the ComputationLayout will be calculated by propagating the - // computation instruction contraints. + // computation instruction constraints. // Layouts constraints are added, then propagated until all LogicalBuffers in // the computation are constrained. Status RunOnComputation(ComputationLayout* computation_layout, diff --git a/tensorflow/examples/android/jni/object_tracking/frame_pair.cc b/tensorflow/examples/android/jni/object_tracking/frame_pair.cc index b1a4db631b5..66e422e87b6 100644 --- a/tensorflow/examples/android/jni/object_tracking/frame_pair.cc +++ b/tensorflow/examples/android/jni/object_tracking/frame_pair.cc @@ -56,7 +56,7 @@ void FramePair::AdjustBox(const BoundingBox box, *scale_y = 1.0f; // The assumption is that all deltas that make it to this stage with a - // correspondending optical_flow_found_keypoint_[i] == true are not in + // corresponding optical_flow_found_keypoint_[i] == true are not in // themselves degenerate. // // The degeneracy with scale arose because if the points are too close to the diff --git a/tensorflow/examples/android/jni/object_tracking/tracked_object.cc b/tensorflow/examples/android/jni/object_tracking/tracked_object.cc index d20857528c3..b243b84ef79 100644 --- a/tensorflow/examples/android/jni/object_tracking/tracked_object.cc +++ b/tensorflow/examples/android/jni/object_tracking/tracked_object.cc @@ -50,7 +50,7 @@ TrackedObject::~TrackedObject() {} void TrackedObject::UpdatePosition(const BoundingBox& new_position, const int64_t timestamp, const ImageData& image_data, - const bool authoratative) { + const bool authoritative) { last_known_position_ = new_position; position_last_computed_time_ = timestamp; @@ -88,7 +88,7 @@ void TrackedObject::UpdatePosition(const BoundingBox& new_position, if (object_model_ != NULL) { object_model_->TrackStep(last_known_position_, *image_data.GetImage(), - *image_data.GetIntegralImage(), authoratative); + *image_data.GetIntegralImage(), authoritative); } } else if (tracked_match_score_ < kMatchScoreForImmediateTermination) { if (num_consecutive_frames_below_threshold_ < 1000) { diff --git a/tensorflow/examples/android/jni/object_tracking/tracked_object.h b/tensorflow/examples/android/jni/object_tracking/tracked_object.h index d7f1a7019bb..6a85449c1e1 100644 --- a/tensorflow/examples/android/jni/object_tracking/tracked_object.h +++ b/tensorflow/examples/android/jni/object_tracking/tracked_object.h @@ -37,7 +37,7 @@ class TrackedObject { ~TrackedObject(); void UpdatePosition(const BoundingBox& new_position, const int64_t timestamp, - const ImageData& image_data, const bool authoratative); + const ImageData& image_data, const bool authoritative); // This method is called when the tracked object is detected at a // given position, and allows the associated Model to grow and/or prune diff --git a/tensorflow/examples/speech_commands/recognize_commands.py b/tensorflow/examples/speech_commands/recognize_commands.py index c983597dabe..b5c796d6c36 100755 --- a/tensorflow/examples/speech_commands/recognize_commands.py +++ b/tensorflow/examples/speech_commands/recognize_commands.py @@ -26,7 +26,7 @@ class RecognizeResult(object): """Save recognition result temporarily. Attributes: - founded_command: A string indicating the word just founded. Defualt value + founded_command: A string indicating the word just founded. Default value is '_silence_' score: An float representing the confidence of founded word. Default value is zero. diff --git a/tensorflow/examples/speech_commands/train.py b/tensorflow/examples/speech_commands/train.py index 343d52e2719..3d7452399f7 100644 --- a/tensorflow/examples/speech_commands/train.py +++ b/tensorflow/examples/speech_commands/train.py @@ -398,7 +398,7 @@ if __name__ == '__main__': '--window_stride_ms', type=float, default=10.0, - help='How far to move in time between spectogram timeslices.',) + help='How far to move in time between spectrogram timeslices.',) parser.add_argument( '--feature_bin_count', type=int, diff --git a/tensorflow/examples/speech_commands/wav_to_features.py b/tensorflow/examples/speech_commands/wav_to_features.py index be3d045f570..2c46066813d 100644 --- a/tensorflow/examples/speech_commands/wav_to_features.py +++ b/tensorflow/examples/speech_commands/wav_to_features.py @@ -53,7 +53,7 @@ def wav_to_features(sample_rate, clip_duration_ms, window_size_ms, sample_rate: Expected sample rate of the wavs. clip_duration_ms: Expected duration in milliseconds of the wavs. window_size_ms: How long each spectrogram timeslice is. - window_stride_ms: How far to move in time between spectogram timeslices. + window_stride_ms: How far to move in time between spectrogram timeslices. feature_bin_count: How many bins to use for the feature fingerprint. quantize: Whether to train the model for eight-bit deployment. preprocess: Spectrogram processing mode; "mfcc", "average" or "micro". @@ -153,7 +153,7 @@ if __name__ == '__main__': '--window_stride_ms', type=float, default=10.0, - help='How far to move in time between spectogram timeslices.',) + help='How far to move in time between spectrogram timeslices.',) parser.add_argument( '--feature_bin_count', type=int, diff --git a/tensorflow/go/op/scope.go b/tensorflow/go/op/scope.go index ac39808d838..83cc6e3bda6 100644 --- a/tensorflow/go/op/scope.go +++ b/tensorflow/go/op/scope.go @@ -25,12 +25,12 @@ import ( // Scope encapsulates common operation properties when building a Graph. // -// A Scope object (and its derivates, e.g., obtained from Scope.SubScope) +// A Scope object (and its derivatives, e.g., obtained from Scope.SubScope) // act as a builder for graphs. They allow common properties (such as // a name prefix) to be specified for multiple operations being added // to the graph. // -// A Scope object and all its derivates (e.g., obtained from Scope.SubScope) +// A Scope object and all its derivatives (e.g., obtained from Scope.SubScope) // are not safe for concurrent use by multiple goroutines. type Scope struct { graph *tf.Graph diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go index f6c5a4f731e..798c005be36 100644 --- a/tensorflow/go/op/wrappers.go +++ b/tensorflow/go/op/wrappers.go @@ -3614,7 +3614,7 @@ func BoostedTreesSparseCalculateBestFeatureSplitSplitType(value string) BoostedT // l1: l1 regularization factor on leaf weights, per instance based. // l2: l2 regularization factor on leaf weights, per instance based. // tree_complexity: adjustment to the gain, per leaf based. -// min_node_weight: mininum avg of hessians in a node before required for the node to be considered for splitting. +// min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. // logits_dimension: The dimension of logit, i.e., number of classes. // // Returns: @@ -3711,7 +3711,7 @@ func BoostedTreesCalculateBestFeatureSplitV2(scope *Scope, node_id_range tf.Outp // l1: l1 regularization factor on leaf weights, per instance based. // l2: l2 regularization factor on leaf weights, per instance based. // tree_complexity: adjustment to the gain, per leaf based. -// min_node_weight: mininum avg of hessians in a node before required for the node to be considered for splitting. +// min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. // max_splits: the number of nodes that can be split in the whole tree. Used as a dimension of output tensors. // // Returns: @@ -3764,7 +3764,7 @@ func BoostedTreesCalculateBestGainsPerFeature(scope *Scope, node_id_range tf.Out // Checks whether a tree ensemble has been initialized. // // Arguments: -// tree_ensemble_handle: Handle to the tree ensemble resouce. +// tree_ensemble_handle: Handle to the tree ensemble resource. // // Returns output boolean on whether it is initialized or not. func IsBoostedTreesEnsembleInitialized(scope *Scope, tree_ensemble_handle tf.Output) (is_initialized tf.Output) { @@ -5160,7 +5160,7 @@ func CudnnRNNParamsToCanonicalV2NumProj(value int64) CudnnRNNParamsToCanonicalV2 // num_layers: Specifies the number of layers in the RNN model. // num_units: Specifies the size of the hidden state. // input_size: Specifies the size of the input state. -// num_params_weigths: number of weight parameter matrix for all layers. +// num_params_weights: number of weight parameter matrix for all layers. // num_params_biases: number of bias parameter vector for all layers. // weights: the canonical form of weights that can be used for saving // and restoration. They are more likely to be compatible across different @@ -8378,7 +8378,7 @@ func BoostedTreesCalculateBestFeatureSplitSplitType(value string) BoostedTreesCa // l1: l1 regularization factor on leaf weights, per instance based. // l2: l2 regularization factor on leaf weights, per instance based. // tree_complexity: adjustment to the gain, per leaf based. -// min_node_weight: mininum avg of hessians in a node before required for the node to be considered for splitting. +// min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. // logits_dimension: The dimension of logit, i.e., number of classes. // // Returns: @@ -13774,7 +13774,7 @@ func DebugNumericSummaryV2OutputDtype(value tf.DataType) DebugNumericSummaryV2At // element is a bit which is set to 1 if the input tensor has an // infinity or nan value, or zero otherwise. // -// 3 (CONCISE_HEALTH): Ouput a float32/64 tensor of shape [5]. The 1st +// 3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st // element is the tensor_id, if provided, and -1 otherwise. The // remaining four slots are the total number of elements, -infs, // +infs, and nans in the input tensor respectively. @@ -14132,11 +14132,11 @@ func TridiagonalSolve(scope *Scope, diagonals tf.Output, rhs tf.Output, optional // // Arguments: // superdiag: Tensor of shape `[..., 1, M]`, representing superdiagonals of -// tri-diagonal matrices to the left of multiplication. Last element is ingored. +// tri-diagonal matrices to the left of multiplication. Last element is ignored. // maindiag: Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal // matrices to the left of multiplication. // subdiag: Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal -// matrices to the left of multiplication. First element is ingored. +// matrices to the left of multiplication. First element is ignored. // rhs: Tensor of shape `[..., M, N]`, representing MxN matrices to the right of // multiplication. // @@ -17744,7 +17744,7 @@ func CudnnRNNCanonicalToParamsV2NumProj(value int64) CudnnRNNCanonicalToParamsV2 // biases: the canonical form of biases that can be used for saving // and restoration. They are more likely to be compatible across different // generations. -// num_params_weigths: number of weight parameter matrix for all layers. +// num_params_weights: number of weight parameter matrix for all layers. // num_params_biases: number of bias parameter vector for all layers. // rnn_mode: Indicates the type of the RNN model. // input_mode: Indicate whether there is a linear projection between the input and @@ -30931,8 +30931,8 @@ func ResourceApplyFtrlV2UseLocking(value bool) ResourceApplyFtrlV2Attr { // linear: Should be from a Variable(). // grad: The gradient. // lr: Scaling factor. Must be a scalar. -// l1: L1 regulariation. Must be a scalar. -// l2: L2 shrinkage regulariation. Must be a scalar. +// l1: L1 regularization. Must be a scalar. +// l2: L2 shrinkage regularization. Must be a scalar. // // lr_power: Scaling factor. Must be a scalar. // @@ -36271,8 +36271,8 @@ func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr { // linear: Should be from a Variable(). // grad: The gradient. // lr: Scaling factor. Must be a scalar. -// l1: L1 regulariation. Must be a scalar. -// l2: L2 regulariation. Must be a scalar. +// l1: L1 regularization. Must be a scalar. +// l2: L2 regularization. Must be a scalar. // lr_power: Scaling factor. Must be a scalar. // // Returns the created operation. @@ -42921,7 +42921,7 @@ func ResourceSparseApplyFtrlV2UseLocking(value bool) ResourceSparseApplyFtrlV2At // indices: A vector of indices into the first dimension of var and accum. // lr: Scaling factor. Must be a scalar. // l1: L1 regularization. Must be a scalar. -// l2: L2 shrinkage regulariation. Must be a scalar. +// l2: L2 shrinkage regularization. Must be a scalar. // // lr_power: Scaling factor. Must be a scalar. // diff --git a/tensorflow/java/src/gen/cc/op_specs.h b/tensorflow/java/src/gen/cc/op_specs.h index 4adcfca96a8..e1af0f16ecf 100644 --- a/tensorflow/java/src/gen/cc/op_specs.h +++ b/tensorflow/java/src/gen/cc/op_specs.h @@ -36,7 +36,7 @@ class EndpointSpec { // package: package of this endpoint (from which also derives its package) // name: name of this endpoint class // javadoc: the endpoint class documentation - // TODO(annarev): hardcode depcreated to false until deprecated is possible + // TODO(annarev): hardcode deprecated to false until deprecated is possible EndpointSpec(const string& package, const string& name, const Javadoc& javadoc) : package_(package), name_(name), javadoc_(javadoc), deprecated_(false) {} diff --git a/tensorflow/java/src/gen/cc/source_writer_test.cc b/tensorflow/java/src/gen/cc/source_writer_test.cc index fb8fc64dffa..490cd2f701a 100644 --- a/tensorflow/java/src/gen/cc/source_writer_test.cc +++ b/tensorflow/java/src/gen/cc/source_writer_test.cc @@ -361,7 +361,7 @@ TEST(WriteType, ParameterizedClassAndSupertypes) { clazz.add_parameter(type_t); Type type_u = Type::Generic("U").add_supertype(Type::Class("Number")); clazz.add_parameter(type_u); - clazz.add_supertype(Type::Interface("Parametrizable").add_parameter(type_u)); + clazz.add_supertype(Type::Interface("Parameterizable").add_parameter(type_u)); clazz.add_supertype(Type::Interface("Runnable")); clazz.add_supertype(Type::Class("SuperTest").add_parameter(type_t)); @@ -370,7 +370,7 @@ TEST(WriteType, ParameterizedClassAndSupertypes) { const char* expected = "package org.tensorflow;\n\n" "public class Test" - " extends SuperTest implements Parametrizable, Runnable {\n}\n"; + " extends SuperTest implements Parameterizable, Runnable {\n}\n"; ASSERT_STREQ(expected, writer.str().data()); } diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD index f08d3e2fde1..4b94e90073b 100644 --- a/tensorflow/python/BUILD +++ b/tensorflow/python/BUILD @@ -5678,7 +5678,7 @@ cc_import( name = "pywrap_tensorflow_import_lib", interface_library = select({ "//tensorflow:windows": ":pywrap_tensorflow_import_lib_file", - "//conditions:default": "not_exsiting_on_unix.lib", # Just a placeholder for Unix platforms + "//conditions:default": "not_existing_on_unix.lib", # Just a placeholder for Unix platforms }), system_provided = 1, ) diff --git a/tensorflow/python/keras/layers/recurrent_v2.py b/tensorflow/python/keras/layers/recurrent_v2.py index 68d0884c54b..136dee51637 100644 --- a/tensorflow/python/keras/layers/recurrent_v2.py +++ b/tensorflow/python/keras/layers/recurrent_v2.py @@ -837,7 +837,7 @@ class LSTMCell(recurrent.LSTMCell): inputs: A 2D tensor, with shape of `[batch, feature]`. states: List of 2 tensors that corresponding to the cell's units. Both of them have shape `[batch, units]`, the first tensor is the memory state - from previous time step, the second tesnor is the carry state from + from previous time step, the second tensor is the carry state from previous time step. For timestep 0, the initial state provided by user will be feed to cell. training: Python boolean indicating whether the layer should behave in diff --git a/tensorflow/python/keras/saving/hdf5_format_test.py b/tensorflow/python/keras/saving/hdf5_format_test.py index 9c58e43d05c..7eddd837c06 100644 --- a/tensorflow/python/keras/saving/hdf5_format_test.py +++ b/tensorflow/python/keras/saving/hdf5_format_test.py @@ -632,7 +632,7 @@ class TestWholeModelSaving(test.TestCase, parameterized.TestCase): # out of proportion. Note that it fits into the internal HDF5 # attribute memory limit on its own but because h5py converts # the list of layer names into numpy array, which uses the same - # amout of memory for every item, it increases the memory + # amount of memory for every item, it increases the memory # requirements substantially. x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15))) f = x @@ -1238,7 +1238,7 @@ class TestWeightSavingAndLoadingTFFormat(test.TestCase): self.assertEqual(44., self.evaluate(v)) @test_util.run_in_graph_and_eager_modes - def test_nonexistant_prefix_directory(self): + def test_nonexistent_prefix_directory(self): m = keras.Model() v = m.add_weight(name='v', shape=[]) self.evaluate(v.assign(42.)) diff --git a/tensorflow/python/kernel_tests/scatter_ops_test.py b/tensorflow/python/kernel_tests/scatter_ops_test.py index dc1d6ebd870..8ed3595b904 100644 --- a/tensorflow/python/kernel_tests/scatter_ops_test.py +++ b/tensorflow/python/kernel_tests/scatter_ops_test.py @@ -329,7 +329,7 @@ class ScatterTest(test.TestCase): indices = np.array([2, 0, 5]) self.evaluate(op(ref, indices, updates)) - # Indicies out of range should not fail. + # Indices out of range should not fail. indices = np.array([-1, 0, 5]) self.evaluate(op(ref, indices, updates)) indices = np.array([2, 0, 6]) diff --git a/tensorflow/python/module/module_test.py b/tensorflow/python/module/module_test.py index 267da80c0bd..7fa4fc14d7f 100644 --- a/tensorflow/python/module/module_test.py +++ b/tensorflow/python/module/module_test.py @@ -151,7 +151,7 @@ class TestModuleNaming(test_util.TensorFlowTestCase): with self.assertRaises(ErrorModuleError): # If super ctor is not called then the name scope isn't opened. We need to # ensure that this doesn't trigger an exception (e.g. the metaclass trying - # to __exit__ a non-existant name scope). + # to __exit__ a non-existent name scope). ErrorModule(call_super=False) self.assertEqual("", get_name_scope()) diff --git a/tensorflow/python/ops/metrics_impl.py b/tensorflow/python/ops/metrics_impl.py index a4437d65018..d2b9274f42f 100644 --- a/tensorflow/python/ops/metrics_impl.py +++ b/tensorflow/python/ops/metrics_impl.py @@ -291,7 +291,7 @@ def _aggregate_across_replicas(metrics_collections, metric_value_fn, *args): # inside a while_loop (and perhaps a TPU rewrite context). But we don't # want the value op to be evaluated every step or on the TPU. So we # create it outside so that it can be evaluated at the end on the host, - # once the update ops have been evaluted. + # once the update ops have been evaluated. # pylint: disable=protected-access if distribution.extended._outer_control_flow_context is None: diff --git a/tensorflow/python/profiler/internal/model_analyzer_testlib.py b/tensorflow/python/profiler/internal/model_analyzer_testlib.py index edce43b9d6c..459822cf5ce 100644 --- a/tensorflow/python/profiler/internal/model_analyzer_testlib.py +++ b/tensorflow/python/profiler/internal/model_analyzer_testlib.py @@ -72,7 +72,7 @@ def BuildFullModel(): return sgd_op.minimize(loss) -def BuildSplitableModel(): +def BuildSplittableModel(): """Build a small model that can be run partially in each step.""" image = array_ops.zeros([2, 6, 6, 3]) diff --git a/tensorflow/python/profiler/profiler_test.py b/tensorflow/python/profiler/profiler_test.py index e4f7361e5d7..3c4514bbc82 100644 --- a/tensorflow/python/profiler/profiler_test.py +++ b/tensorflow/python/profiler/profiler_test.py @@ -111,7 +111,7 @@ class ProfilerTest(test.TestCase): opts = builder.time_and_memory(min_bytes=0) with session.Session() as sess: - r1, r2, r3 = lib.BuildSplitableModel() + r1, r2, r3 = lib.BuildSplittableModel() sess.run(variables.global_variables_initializer()) profiler = model_analyzer.Profiler(sess.graph) diff --git a/tensorflow/python/saved_model/utils_test.py b/tensorflow/python/saved_model/utils_test.py index 2b9e8fb2e03..fa623c4239e 100644 --- a/tensorflow/python/saved_model/utils_test.py +++ b/tensorflow/python/saved_model/utils_test.py @@ -163,7 +163,7 @@ class UtilsTest(test.TestCase): def testGetTensorFromInfoRaisesErrors(self): expected = array_ops.placeholder(dtypes.float32, 1, name="x") tensor_info = utils.build_tensor_info(expected) - tensor_info.name = "blah:0" # Nonexistant name. + tensor_info.name = "blah:0" # Nonexistent name. with self.assertRaises(KeyError): utils.get_tensor_from_tensor_info(tensor_info) tensor_info.ClearField("name") # Malformed (missing encoding). diff --git a/tensorflow/python/training/momentum_test.py b/tensorflow/python/training/momentum_test.py index ba155fa6c64..8d27e957fc8 100644 --- a/tensorflow/python/training/momentum_test.py +++ b/tensorflow/python/training/momentum_test.py @@ -260,7 +260,7 @@ class MomentumOptimizerTest(test.TestCase): self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0)) @test_util.run_in_graph_and_eager_modes(reset_test=True) - def testMinimizeWith2DIndiciesForEmbeddingLookup(self): + def testMinimizeWith2DIndicesForEmbeddingLookup(self): # This test invokes the ResourceSparseApplyMomentum operation, which # did not have a registered GPU kernel as of April 2018. With graph # execution, the placement algorithm notices this and automatically diff --git a/tensorflow/stream_executor/blas.h b/tensorflow/stream_executor/blas.h index faf4a13b17f..d361343c381 100644 --- a/tensorflow/stream_executor/blas.h +++ b/tensorflow/stream_executor/blas.h @@ -92,7 +92,7 @@ string SideString(Side s); // Type with which intermediate computations of a blas routine are performed. // // Some blas calls can perform computations with a type that's different than -// the type of their inputs/outputs. This lets you e.g. multiply two matricies +// the type of their inputs/outputs. This lets you e.g. multiply two matrices // of int8s using float32s to store the matmul's intermediate values. enum class ComputationType { kF16, // 16-bit floating-point diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc index 70cc11a3e03..03947dafb07 100755 --- a/tensorflow/stream_executor/cuda/cuda_dnn.cc +++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc @@ -1195,7 +1195,7 @@ class CudnnRnnDescriptor : public dnn::RnnDescriptor { namespace { -// Check if the LSTM projection is used. If yes, an additional weigth matrix +// Check if the LSTM projection is used. If yes, an additional weight matrix // (projection matrix) will be fetched to the 'weights'. Otherwise, nothing will // be done. port::Status CheckAndFetchProjectionWeights( diff --git a/tensorflow/stream_executor/cuda/cudnn_6_0.inc b/tensorflow/stream_executor/cuda/cudnn_6_0.inc index e9c51d60570..6ac7a695d9f 100644 --- a/tensorflow/stream_executor/cuda/cudnn_6_0.inc +++ b/tensorflow/stream_executor/cuda/cudnn_6_0.inc @@ -516,11 +516,11 @@ cudnnStatus_t CUDNNWINAPI cudnnGetConvolutionNdForwardOutputDim( const cudnnTensorDescriptor_t inputTensorDesc, const cudnnFilterDescriptor_t filterDesc, int nbDims, - int tensorOuputDimA[] ) { + int tensorOutputDimA[] ) { using FuncPtr = cudnnStatus_t (CUDNNWINAPI *)(const cudnnConvolutionDescriptor_t, const cudnnTensorDescriptor_t, const cudnnFilterDescriptor_t, int, int []); static auto func_ptr = LoadSymbol("cudnnGetConvolutionNdForwardOutputDim"); if (!func_ptr) return GetSymbolNotFoundError(); - return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOuputDimA); + return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOutputDimA); } cudnnStatus_t CUDNNWINAPI cudnnDestroyConvolutionDescriptor( diff --git a/tensorflow/stream_executor/cuda/cudnn_7_0.inc b/tensorflow/stream_executor/cuda/cudnn_7_0.inc index ac6b0dd823e..d2ea31e366b 100644 --- a/tensorflow/stream_executor/cuda/cudnn_7_0.inc +++ b/tensorflow/stream_executor/cuda/cudnn_7_0.inc @@ -559,11 +559,11 @@ cudnnStatus_t CUDNNWINAPI cudnnGetConvolutionNdForwardOutputDim( const cudnnTensorDescriptor_t inputTensorDesc, const cudnnFilterDescriptor_t filterDesc, int nbDims, - int tensorOuputDimA[] ) { + int tensorOutputDimA[] ) { using FuncPtr = cudnnStatus_t (CUDNNWINAPI *)(const cudnnConvolutionDescriptor_t, const cudnnTensorDescriptor_t, const cudnnFilterDescriptor_t, int, int []); static auto func_ptr = LoadSymbol("cudnnGetConvolutionNdForwardOutputDim"); if (!func_ptr) return GetSymbolNotFoundError(); - return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOuputDimA); + return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOutputDimA); } cudnnStatus_t CUDNNWINAPI cudnnDestroyConvolutionDescriptor( diff --git a/tensorflow/stream_executor/cuda/cudnn_7_1.inc b/tensorflow/stream_executor/cuda/cudnn_7_1.inc index 21abd7fdb16..9f4b28f3fe3 100644 --- a/tensorflow/stream_executor/cuda/cudnn_7_1.inc +++ b/tensorflow/stream_executor/cuda/cudnn_7_1.inc @@ -559,11 +559,11 @@ cudnnStatus_t CUDNNWINAPI cudnnGetConvolutionNdForwardOutputDim( const cudnnTensorDescriptor_t inputTensorDesc, const cudnnFilterDescriptor_t filterDesc, int nbDims, - int tensorOuputDimA[] ) { + int tensorOutputDimA[] ) { using FuncPtr = cudnnStatus_t (CUDNNWINAPI *)(const cudnnConvolutionDescriptor_t, const cudnnTensorDescriptor_t, const cudnnFilterDescriptor_t, int, int []); static auto func_ptr = LoadSymbol("cudnnGetConvolutionNdForwardOutputDim"); if (!func_ptr) return GetSymbolNotFoundError(); - return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOuputDimA); + return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOutputDimA); } cudnnStatus_t CUDNNWINAPI cudnnDestroyConvolutionDescriptor( diff --git a/tensorflow/stream_executor/cuda/cudnn_7_3.inc b/tensorflow/stream_executor/cuda/cudnn_7_3.inc index 1f8e997ab9d..0ee8e1492d5 100644 --- a/tensorflow/stream_executor/cuda/cudnn_7_3.inc +++ b/tensorflow/stream_executor/cuda/cudnn_7_3.inc @@ -557,11 +557,11 @@ cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDes const cudnnTensorDescriptor_t inputTensorDesc, const cudnnFilterDescriptor_t filterDesc, int nbDims, - int tensorOuputDimA[]) { + int tensorOutputDimA[]) { using FuncPtr = cudnnStatus_t (CUDNNWINAPI *)(const cudnnConvolutionDescriptor_t, const cudnnTensorDescriptor_t, const cudnnFilterDescriptor_t, int, int []); static auto func_ptr = LoadSymbol("cudnnGetConvolutionNdForwardOutputDim"); if (!func_ptr) return GetSymbolNotFoundError(); - return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOuputDimA); + return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOutputDimA); } cudnnStatus_t CUDNNWINAPI diff --git a/tensorflow/stream_executor/cuda/cudnn_7_4.inc b/tensorflow/stream_executor/cuda/cudnn_7_4.inc index cd35c1fbb74..bd9f49f9780 100644 --- a/tensorflow/stream_executor/cuda/cudnn_7_4.inc +++ b/tensorflow/stream_executor/cuda/cudnn_7_4.inc @@ -557,11 +557,11 @@ cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDes const cudnnTensorDescriptor_t inputTensorDesc, const cudnnFilterDescriptor_t filterDesc, int nbDims, - int tensorOuputDimA[]) { + int tensorOutputDimA[]) { using FuncPtr = cudnnStatus_t (CUDNNWINAPI *)(const cudnnConvolutionDescriptor_t, const cudnnTensorDescriptor_t, const cudnnFilterDescriptor_t, int, int []); static auto func_ptr = LoadSymbol("cudnnGetConvolutionNdForwardOutputDim"); if (!func_ptr) return GetSymbolNotFoundError(); - return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOuputDimA); + return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOutputDimA); } cudnnStatus_t CUDNNWINAPI diff --git a/tensorflow/stream_executor/cuda/cudnn_7_6.inc b/tensorflow/stream_executor/cuda/cudnn_7_6.inc index 030f3ed20d0..7a5f1c9751d 100644 --- a/tensorflow/stream_executor/cuda/cudnn_7_6.inc +++ b/tensorflow/stream_executor/cuda/cudnn_7_6.inc @@ -702,11 +702,11 @@ cudnnGetConvolutionNdForwardOutputDim(const cudnnConvolutionDescriptor_t convDes const cudnnTensorDescriptor_t inputTensorDesc, const cudnnFilterDescriptor_t filterDesc, int nbDims, - int tensorOuputDimA[]) { + int tensorOutputDimA[]) { using FuncPtr = cudnnStatus_t (CUDNNWINAPI *)(const cudnnConvolutionDescriptor_t, const cudnnTensorDescriptor_t, const cudnnFilterDescriptor_t, int, int []); static auto func_ptr = LoadSymbol("cudnnGetConvolutionNdForwardOutputDim"); if (!func_ptr) return GetSymbolNotFoundError(); - return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOuputDimA); + return func_ptr(convDesc, inputTensorDesc, filterDesc, nbDims, tensorOutputDimA); } cudnnStatus_t CUDNNWINAPI diff --git a/tensorflow/stream_executor/cuda/cusparse_9_0.inc b/tensorflow/stream_executor/cuda/cusparse_9_0.inc index 2488823714a..bb82f3ebb46 100644 --- a/tensorflow/stream_executor/cuda/cusparse_9_0.inc +++ b/tensorflow/stream_executor/cuda/cusparse_9_0.inc @@ -4887,7 +4887,7 @@ cusparseStatus_t CUSPARSEAPI cusparseDcsr2csr_compress( int m, // number of rows int n, const cusparseMatDescr_t descra, const double *csrValA, // csr values array-the elements which are below a - // certain tolerance will be remvoed + // certain tolerance will be removed const int *csrColIndA, const int *csrRowPtrA, // corresponding input noncompressed row pointer int nnzA, const int *nnzPerRow, double *csrValC, int *csrColIndC, @@ -4907,7 +4907,7 @@ cusparseStatus_t CUSPARSEAPI cusparseCcsr2csr_compress( int m, // number of rows int n, const cusparseMatDescr_t descra, const cuComplex *csrValA, // csr values array-the elements which are below - // a certain tolerance will be remvoed + // a certain tolerance will be removed const int *csrColIndA, const int *csrRowPtrA, // corresponding input noncompressed row pointer int nnzA, const int *nnzPerRow, cuComplex *csrValC, int *csrColIndC, @@ -4927,7 +4927,7 @@ cusparseStatus_t CUSPARSEAPI cusparseZcsr2csr_compress( int m, // number of rows int n, const cusparseMatDescr_t descra, const cuDoubleComplex *csrValA, // csr values array-the elements which are - // below a certain tolerance will be remvoed + // below a certain tolerance will be removed const int *csrColIndA, const int *csrRowPtrA, // corresponding input noncompressed row pointer int nnzA, const int *nnzPerRow, cuDoubleComplex *csrValC, int *csrColIndC, diff --git a/tensorflow/stream_executor/device_description.cc b/tensorflow/stream_executor/device_description.cc index 9038c04947a..5bdfb7ef1d0 100644 --- a/tensorflow/stream_executor/device_description.cc +++ b/tensorflow/stream_executor/device_description.cc @@ -137,7 +137,7 @@ bool ThreadDimOk(const DeviceDescription &device_description, thread_dim.z <= limit.z; if (!ok) { VLOG(2) << "thread dim " << thread_dim.ToString() - << " exceeds limit contraints of " << limit.ToString(); + << " exceeds limit constraints of " << limit.ToString(); } return ok; } diff --git a/tensorflow/stream_executor/device_memory.h b/tensorflow/stream_executor/device_memory.h index c93ca3fefd7..251c70224f7 100644 --- a/tensorflow/stream_executor/device_memory.h +++ b/tensorflow/stream_executor/device_memory.h @@ -109,7 +109,7 @@ class DeviceMemoryBase { private: void *opaque_; // Platform-dependent value representing allocated memory. uint64 size_; // Size in bytes of this allocation. - uint64 payload_ = 0; // Payload data associtated with this allocation. + uint64 payload_ = 0; // Payload data associated with this allocation. }; // Typed wrapper around "void *"-like DeviceMemoryBase. diff --git a/tensorflow/stream_executor/dnn.h b/tensorflow/stream_executor/dnn.h index b791e94d903..3333cea45b1 100644 --- a/tensorflow/stream_executor/dnn.h +++ b/tensorflow/stream_executor/dnn.h @@ -2148,7 +2148,7 @@ class DnnSupport { // max_seq_length: the max length of the sequences. // batch_size: the size of a minibatch. // data_size: the size of the state. - // seq_lenghs: the lengths of sequences in a batch. + // seq_lengths: the lengths of sequences in a batch. // data_type: an enum to specify the type for the underlying data. virtual port::StatusOr> createRnnSequenceTensorDescriptor(int max_seq_length, int batch_size, diff --git a/tensorflow/stream_executor/gpu/gpu_executor.h b/tensorflow/stream_executor/gpu/gpu_executor.h index f373a574a2a..a24b402c743 100644 --- a/tensorflow/stream_executor/gpu/gpu_executor.h +++ b/tensorflow/stream_executor/gpu/gpu_executor.h @@ -40,7 +40,7 @@ namespace stream_executor { namespace gpu { // CUDA-platform implementation of the platform-agnostic -// StreamExecutorInferface. +// StreamExecutorInterface. class GpuExecutor : public internal::StreamExecutorInterface { public: // sub_platform indicates the subplatform used in this executor; it must @@ -328,10 +328,10 @@ class GpuExecutor : public internal::StreamExecutorInterface { // for use in getting device metadata. Immutable post-initialization. int device_ordinal_; - // The major verion of the compute capability for device_. + // The major version of the compute capability for device_. int cc_major_; - // The minor verion of the compute capability for device_. + // The minor version of the compute capability for device_. int cc_minor_; // GPU ISA version for device_. diff --git a/tensorflow/stream_executor/gpu/gpu_timer.h b/tensorflow/stream_executor/gpu/gpu_timer.h index 886f0c2d577..609d7f50e76 100644 --- a/tensorflow/stream_executor/gpu/gpu_timer.h +++ b/tensorflow/stream_executor/gpu/gpu_timer.h @@ -30,7 +30,7 @@ class GpuExecutor; class GpuStream; // Wraps a pair of GpuEventHandles in order to satisfy the platform-independent -// TimerInferface -- both a start and a stop event are present which may be +// TimerInterface -- both a start and a stop event are present which may be // recorded in a stream. class GpuTimer : public internal::TimerInterface { public: diff --git a/tensorflow/stream_executor/multi_platform_manager.h b/tensorflow/stream_executor/multi_platform_manager.h index 1f253c057cc..6e6617a6da9 100644 --- a/tensorflow/stream_executor/multi_platform_manager.h +++ b/tensorflow/stream_executor/multi_platform_manager.h @@ -116,7 +116,7 @@ class MultiPlatformManager { static port::StatusOr InitializePlatformWithId( const Platform::Id& id, const std::map& options); - // Retrives the platforms satisfying the given filter, i.e. returns true. + // Retrieves the platforms satisfying the given filter, i.e. returns true. // Returned Platforms are always initialized. static port::StatusOr> PlatformsWithFilter( const std::function& filter); @@ -134,7 +134,7 @@ class MultiPlatformManager { // during allocation of such Platforms, to avoid spurious reporting at program // exit. - // Interface for a listener that gets notfied at certain events. + // Interface for a listener that gets notified at certain events. class Listener { public: virtual ~Listener() = default; diff --git a/tensorflow/stream_executor/rocm/rocm_blas.cc b/tensorflow/stream_executor/rocm/rocm_blas.cc index a5a588bbbde..1c695b7a24c 100644 --- a/tensorflow/stream_executor/rocm/rocm_blas.cc +++ b/tensorflow/stream_executor/rocm/rocm_blas.cc @@ -436,7 +436,7 @@ bool ROCMBlas::DoBlasAsum(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory *result) { LOG(ERROR) << "rocBLAS does not currently support the ASUM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -444,7 +444,7 @@ bool ROCMBlas::DoBlasAsum(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory *result) { LOG(ERROR) << "rocBLAS does not currently support the ASUM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -469,7 +469,7 @@ bool ROCMBlas::DoBlasAxpy(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the AXPY operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -478,7 +478,7 @@ bool ROCMBlas::DoBlasAxpy(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the AXPY operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -502,7 +502,7 @@ bool ROCMBlas::DoBlasCopy(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the COPY operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -510,7 +510,7 @@ bool ROCMBlas::DoBlasCopy(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the COPY operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -537,7 +537,7 @@ bool ROCMBlas::DoBlasDotc(Stream *stream, uint64 elem_count, const DeviceMemory> &y, int incy, DeviceMemory> *result) { LOG(ERROR) << "rocBLAS does not currently support the DOT operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -546,7 +546,7 @@ bool ROCMBlas::DoBlasDotc(Stream *stream, uint64 elem_count, const DeviceMemory> &y, int incy, DeviceMemory> *result) { LOG(ERROR) << "rocBLAS does not currently support the DOT operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -555,7 +555,7 @@ bool ROCMBlas::DoBlasDotu(Stream *stream, uint64 elem_count, const DeviceMemory> &y, int incy, DeviceMemory> *result) { LOG(ERROR) << "rocBLAS does not currently support the DOT operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -564,7 +564,7 @@ bool ROCMBlas::DoBlasDotu(Stream *stream, uint64 elem_count, const DeviceMemory> &y, int incy, DeviceMemory> *result) { LOG(ERROR) << "rocBLAS does not currently support the DOT operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -588,7 +588,7 @@ bool ROCMBlas::DoBlasNrm2(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory *result) { LOG(ERROR) << "rocBLAS does not currently support the NRM2 operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -596,7 +596,7 @@ bool ROCMBlas::DoBlasNrm2(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory *result) { LOG(ERROR) << "rocBLAS does not currently support the NRM2 operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -604,7 +604,7 @@ bool ROCMBlas::DoBlasRot(Stream *stream, uint64 elem_count, DeviceMemory *x, int incx, DeviceMemory *y, int incy, float c, float s) { LOG(ERROR) << "rocBLAS does not currently support the ROT operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -613,7 +613,7 @@ bool ROCMBlas::DoBlasRot(Stream *stream, uint64 elem_count, DeviceMemory *y, int incy, double c, double s) { LOG(ERROR) << "rocBLAS does not currently support the ROT operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -622,7 +622,7 @@ bool ROCMBlas::DoBlasRot(Stream *stream, uint64 elem_count, DeviceMemory> *y, int incy, float c, float s) { LOG(ERROR) << "rocBLAS does not currently support the ROT operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -631,7 +631,7 @@ bool ROCMBlas::DoBlasRot(Stream *stream, uint64 elem_count, DeviceMemory> *y, int incy, double c, double s) { LOG(ERROR) << "rocBLAS does not currently support the ROT operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -639,7 +639,7 @@ bool ROCMBlas::DoBlasRotg(Stream *stream, DeviceMemory *a, DeviceMemory *b, DeviceMemory *c, DeviceMemory *s) { LOG(ERROR) << "rocBLAS does not currently support the ROTG operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -647,7 +647,7 @@ bool ROCMBlas::DoBlasRotg(Stream *stream, DeviceMemory *a, DeviceMemory *b, DeviceMemory *c, DeviceMemory *s) { LOG(ERROR) << "rocBLAS does not currently support the ROTG operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -656,7 +656,7 @@ bool ROCMBlas::DoBlasRotg(Stream *stream, DeviceMemory> *a, DeviceMemory *c, DeviceMemory> *s) { LOG(ERROR) << "rocBLAS does not currently support the ROTG operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -665,7 +665,7 @@ bool ROCMBlas::DoBlasRotg(Stream *stream, DeviceMemory> *a, DeviceMemory *c, DeviceMemory> *s) { LOG(ERROR) << "rocBLAS does not currently support the ROTG operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -674,7 +674,7 @@ bool ROCMBlas::DoBlasRotm(Stream *stream, uint64 elem_count, DeviceMemory *y, int incy, const DeviceMemory ¶m) { LOG(ERROR) << "rocBLAS does not currently support the ROTM operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -683,7 +683,7 @@ bool ROCMBlas::DoBlasRotm(Stream *stream, uint64 elem_count, DeviceMemory *y, int incy, const DeviceMemory ¶m) { LOG(ERROR) << "rocBLAS does not currently support the ROTM operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -692,7 +692,7 @@ bool ROCMBlas::DoBlasRotmg(Stream *stream, DeviceMemory *d1, const DeviceMemory &y1, DeviceMemory *param) { LOG(ERROR) << "rocBLAS does not currently support the ROTMG operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -701,7 +701,7 @@ bool ROCMBlas::DoBlasRotmg(Stream *stream, DeviceMemory *d1, const DeviceMemory &y1, DeviceMemory *param) { LOG(ERROR) << "rocBLAS does not currently support the ROTMG operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -722,14 +722,14 @@ bool ROCMBlas::DoBlasScal(Stream *stream, uint64 elem_count, double alpha, bool ROCMBlas::DoBlasScal(Stream *stream, uint64 elem_count, float alpha, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the SCAL operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } bool ROCMBlas::DoBlasScal(Stream *stream, uint64 elem_count, double alpha, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the SCAL operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -737,7 +737,7 @@ bool ROCMBlas::DoBlasScal(Stream *stream, uint64 elem_count, std::complex alpha, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the SCAL operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -745,7 +745,7 @@ bool ROCMBlas::DoBlasScal(Stream *stream, uint64 elem_count, std::complex alpha, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the SCAL operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -769,7 +769,7 @@ bool ROCMBlas::DoBlasSwap(Stream *stream, uint64 elem_count, DeviceMemory> *x, int incx, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the SWAP operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -777,7 +777,7 @@ bool ROCMBlas::DoBlasSwap(Stream *stream, uint64 elem_count, DeviceMemory> *x, int incx, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the SWAP operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -801,7 +801,7 @@ bool ROCMBlas::DoBlasIamax(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory *result) { LOG(ERROR) << "rocBLAS does not currently support the AMAX operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -809,7 +809,7 @@ bool ROCMBlas::DoBlasIamax(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory *result) { LOG(ERROR) << "rocBLAS does not currently support the AMAX operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -833,7 +833,7 @@ bool ROCMBlas::DoBlasIamin(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory *result) { LOG(ERROR) << "rocBLAS does not currently support the AMIN operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -841,7 +841,7 @@ bool ROCMBlas::DoBlasIamin(Stream *stream, uint64 elem_count, const DeviceMemory> &x, int incx, DeviceMemory *result) { LOG(ERROR) << "rocBLAS does not currently support the AMIN operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -851,7 +851,7 @@ bool ROCMBlas::DoBlasGbmv(Stream *stream, blas::Transpose trans, uint64 m, const DeviceMemory &x, int incx, float beta, DeviceMemory *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the GBMV operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -861,7 +861,7 @@ bool ROCMBlas::DoBlasGbmv(Stream *stream, blas::Transpose trans, uint64 m, const DeviceMemory &x, int incx, double beta, DeviceMemory *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the GBMV operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -873,7 +873,7 @@ bool ROCMBlas::DoBlasGbmv(Stream *stream, blas::Transpose trans, uint64 m, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the GBMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -885,7 +885,7 @@ bool ROCMBlas::DoBlasGbmv(Stream *stream, blas::Transpose trans, uint64 m, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the GBMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -916,7 +916,7 @@ bool ROCMBlas::DoBlasGemv(Stream *stream, blas::Transpose trans, uint64 m, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the GEMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -927,7 +927,7 @@ bool ROCMBlas::DoBlasGemv(Stream *stream, blas::Transpose trans, uint64 m, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the GEMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -955,7 +955,7 @@ bool ROCMBlas::DoBlasGerc(Stream *stream, uint64 m, uint64 n, const DeviceMemory> &y, int incy, DeviceMemory> *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the GER operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -965,7 +965,7 @@ bool ROCMBlas::DoBlasGerc(Stream *stream, uint64 m, uint64 n, const DeviceMemory> &y, int incy, DeviceMemory> *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the GER operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -975,7 +975,7 @@ bool ROCMBlas::DoBlasGeru(Stream *stream, uint64 m, uint64 n, const DeviceMemory> &y, int incy, DeviceMemory> *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the GERU operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -985,7 +985,7 @@ bool ROCMBlas::DoBlasGeru(Stream *stream, uint64 m, uint64 n, const DeviceMemory> &y, int incy, DeviceMemory> *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the GERU operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -996,7 +996,7 @@ bool ROCMBlas::DoBlasHbmv(Stream *stream, blas::UpperLower uplo, uint64 n, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the HBMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1007,7 +1007,7 @@ bool ROCMBlas::DoBlasHbmv(Stream *stream, blas::UpperLower uplo, uint64 n, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the HBMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1018,7 +1018,7 @@ bool ROCMBlas::DoBlasHemv(Stream *stream, blas::UpperLower uplo, uint64 n, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the HEMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1029,7 +1029,7 @@ bool ROCMBlas::DoBlasHemv(Stream *stream, blas::UpperLower uplo, uint64 n, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the HEMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1038,7 +1038,7 @@ bool ROCMBlas::DoBlasHer(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory> &x, int incx, DeviceMemory> *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the HER operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1047,7 +1047,7 @@ bool ROCMBlas::DoBlasHer(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory> &x, int incx, DeviceMemory> *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the HER operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1057,7 +1057,7 @@ bool ROCMBlas::DoBlasHer2(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory> &y, int incy, DeviceMemory> *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the HER2 operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1067,7 +1067,7 @@ bool ROCMBlas::DoBlasHer2(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory> &y, int incy, DeviceMemory> *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the HER2 operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1078,7 +1078,7 @@ bool ROCMBlas::DoBlasHpmv(Stream *stream, blas::UpperLower uplo, uint64 n, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the HPMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1089,7 +1089,7 @@ bool ROCMBlas::DoBlasHpmv(Stream *stream, blas::UpperLower uplo, uint64 n, std::complex beta, DeviceMemory> *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the HPMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1098,7 +1098,7 @@ bool ROCMBlas::DoBlasHpr(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory> &x, int incx, DeviceMemory> *ap) { LOG(ERROR) << "rocBLAS does not currently support the HPR operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1107,7 +1107,7 @@ bool ROCMBlas::DoBlasHpr(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory> &x, int incx, DeviceMemory> *ap) { LOG(ERROR) << "rocBLAS does not currently support the HPR operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1117,7 +1117,7 @@ bool ROCMBlas::DoBlasHpr2(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory> &y, int incy, DeviceMemory> *ap) { LOG(ERROR) << "rocBLAS does not currently support the HPR2 operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1127,7 +1127,7 @@ bool ROCMBlas::DoBlasHpr2(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory> &y, int incy, DeviceMemory> *ap) { LOG(ERROR) << "rocBLAS does not currently support the HPR2 operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1136,7 +1136,7 @@ bool ROCMBlas::DoBlasSbmv(Stream *stream, blas::UpperLower uplo, uint64 n, int lda, const DeviceMemory &x, int incx, float beta, DeviceMemory *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the SBMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1146,7 +1146,7 @@ bool ROCMBlas::DoBlasSbmv(Stream *stream, blas::UpperLower uplo, uint64 n, int lda, const DeviceMemory &x, int incx, double beta, DeviceMemory *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the SBMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1155,7 +1155,7 @@ bool ROCMBlas::DoBlasSpmv(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory &x, int incx, float beta, DeviceMemory *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the SPMV operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1164,7 +1164,7 @@ bool ROCMBlas::DoBlasSpmv(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory &x, int incx, double beta, DeviceMemory *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the SPMV operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1172,7 +1172,7 @@ bool ROCMBlas::DoBlasSpr(Stream *stream, blas::UpperLower uplo, uint64 n, float alpha, const DeviceMemory &x, int incx, DeviceMemory *ap) { LOG(ERROR) << "rocBLAS does not currently support the SPR operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1180,7 +1180,7 @@ bool ROCMBlas::DoBlasSpr(Stream *stream, blas::UpperLower uplo, uint64 n, double alpha, const DeviceMemory &x, int incx, DeviceMemory *ap) { LOG(ERROR) << "rocBLAS does not currently support the SPR operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1189,7 +1189,7 @@ bool ROCMBlas::DoBlasSpr2(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory &y, int incy, DeviceMemory *ap) { LOG(ERROR) << "rocBLAS does not currently support the SPR2 operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1198,7 +1198,7 @@ bool ROCMBlas::DoBlasSpr2(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory &y, int incy, DeviceMemory *ap) { LOG(ERROR) << "rocBLAS does not currently support the SPR2 operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1207,7 +1207,7 @@ bool ROCMBlas::DoBlasSymv(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory &x, int incx, float beta, DeviceMemory *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the SYMV operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1216,7 +1216,7 @@ bool ROCMBlas::DoBlasSymv(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory &x, int incx, double beta, DeviceMemory *y, int incy) { LOG(ERROR) << "rocBLAS does not currently support the SYMV operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1243,7 +1243,7 @@ bool ROCMBlas::DoBlasSyr2(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory &y, int incy, DeviceMemory *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the SYR2 operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1252,7 +1252,7 @@ bool ROCMBlas::DoBlasSyr2(Stream *stream, blas::UpperLower uplo, uint64 n, const DeviceMemory &y, int incy, DeviceMemory *a, int lda) { LOG(ERROR) << "rocBLAS does not currently support the SYR2 operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1261,7 +1261,7 @@ bool ROCMBlas::DoBlasTbmv(Stream *stream, blas::UpperLower uplo, uint64 k, const DeviceMemory &a, int lda, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TBMV operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1270,7 +1270,7 @@ bool ROCMBlas::DoBlasTbmv(Stream *stream, blas::UpperLower uplo, uint64 k, const DeviceMemory &a, int lda, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TBMV operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1280,7 +1280,7 @@ bool ROCMBlas::DoBlasTbmv(Stream *stream, blas::UpperLower uplo, int lda, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TBMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1290,7 +1290,7 @@ bool ROCMBlas::DoBlasTbmv(Stream *stream, blas::UpperLower uplo, int lda, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TBMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1299,7 +1299,7 @@ bool ROCMBlas::DoBlasTbsv(Stream *stream, blas::UpperLower uplo, uint64 k, const DeviceMemory &a, int lda, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TBSV operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1308,7 +1308,7 @@ bool ROCMBlas::DoBlasTbsv(Stream *stream, blas::UpperLower uplo, uint64 k, const DeviceMemory &a, int lda, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TBSV operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1318,7 +1318,7 @@ bool ROCMBlas::DoBlasTbsv(Stream *stream, blas::UpperLower uplo, int lda, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TBSV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1328,7 +1328,7 @@ bool ROCMBlas::DoBlasTbsv(Stream *stream, blas::UpperLower uplo, int lda, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TBSV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1337,7 +1337,7 @@ bool ROCMBlas::DoBlasTpmv(Stream *stream, blas::UpperLower uplo, const DeviceMemory &ap, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TPMV operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1346,7 +1346,7 @@ bool ROCMBlas::DoBlasTpmv(Stream *stream, blas::UpperLower uplo, const DeviceMemory &ap, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TPMV operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1355,7 +1355,7 @@ bool ROCMBlas::DoBlasTpmv(Stream *stream, blas::UpperLower uplo, const DeviceMemory> &ap, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TPMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1364,7 +1364,7 @@ bool ROCMBlas::DoBlasTpmv(Stream *stream, blas::UpperLower uplo, const DeviceMemory> &ap, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TPMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1373,7 +1373,7 @@ bool ROCMBlas::DoBlasTpsv(Stream *stream, blas::UpperLower uplo, const DeviceMemory &ap, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TPSV operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1382,7 +1382,7 @@ bool ROCMBlas::DoBlasTpsv(Stream *stream, blas::UpperLower uplo, const DeviceMemory &ap, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TPSV operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1391,7 +1391,7 @@ bool ROCMBlas::DoBlasTpsv(Stream *stream, blas::UpperLower uplo, const DeviceMemory> &ap, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TPSV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1400,7 +1400,7 @@ bool ROCMBlas::DoBlasTpsv(Stream *stream, blas::UpperLower uplo, const DeviceMemory> &ap, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TPSV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1409,7 +1409,7 @@ bool ROCMBlas::DoBlasTrmv(Stream *stream, blas::UpperLower uplo, const DeviceMemory &a, int lda, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TRMV operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1418,7 +1418,7 @@ bool ROCMBlas::DoBlasTrmv(Stream *stream, blas::UpperLower uplo, const DeviceMemory &a, int lda, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TRMV operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1427,7 +1427,7 @@ bool ROCMBlas::DoBlasTrmv(Stream *stream, blas::UpperLower uplo, const DeviceMemory> &a, int lda, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TRMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1436,7 +1436,7 @@ bool ROCMBlas::DoBlasTrmv(Stream *stream, blas::UpperLower uplo, const DeviceMemory> &a, int lda, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TRMV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1445,7 +1445,7 @@ bool ROCMBlas::DoBlasTrsv(Stream *stream, blas::UpperLower uplo, const DeviceMemory &a, int lda, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TRSV operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1454,7 +1454,7 @@ bool ROCMBlas::DoBlasTrsv(Stream *stream, blas::UpperLower uplo, const DeviceMemory &a, int lda, DeviceMemory *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TRSV operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1463,7 +1463,7 @@ bool ROCMBlas::DoBlasTrsv(Stream *stream, blas::UpperLower uplo, const DeviceMemory> &a, int lda, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TRSV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1472,7 +1472,7 @@ bool ROCMBlas::DoBlasTrsv(Stream *stream, blas::UpperLower uplo, const DeviceMemory> &a, int lda, DeviceMemory> *x, int incx) { LOG(ERROR) << "rocBLAS does not currently support the TRSV operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1579,7 +1579,7 @@ bool ROCMBlas::DoBlasGemm(Stream *stream, blas::Transpose transa, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the GEMM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1591,7 +1591,7 @@ bool ROCMBlas::DoBlasGemm(Stream *stream, blas::Transpose transa, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the GEMM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1739,7 +1739,7 @@ bool ROCMBlas::DoBlasGemmWithAlgorithm( blas::ProfileResult *output_profile_result) { LOG(ERROR) << "rocBLAS does not currently support the GEMMwithAlgorithm operation " - << "for the \"int8\" dataype"; + << "for the \"int8\" datatype"; return false; } @@ -1753,7 +1753,7 @@ bool ROCMBlas::DoBlasGemmWithAlgorithm( blas::AlgorithmType algorithm, blas::ProfileResult *output_profile_result) { LOG(ERROR) << "rocBLAS does not currently support the GEMMwithAlgorithm operation " - << "for the \"half\" dataype"; + << "for the \"half\" datatype"; return false; } @@ -1766,7 +1766,7 @@ bool ROCMBlas::DoBlasGemmWithAlgorithm( blas::AlgorithmType algorithm, blas::ProfileResult *output_profile_result) { LOG(ERROR) << "rocBLAS does not currently support the GEMMwithAlgorithm operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -1779,7 +1779,7 @@ bool ROCMBlas::DoBlasGemmWithAlgorithm( blas::AlgorithmType algorithm, blas::ProfileResult *output_profile_result) { LOG(ERROR) << "rocBLAS does not currently support the GEMMwithAlgorithm operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -1794,7 +1794,7 @@ bool ROCMBlas::DoBlasGemmWithAlgorithm( blas::ProfileResult *output_profile_result) { LOG(ERROR) << "rocBLAS does not currently support the GEMMwithAlgorithm operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1809,7 +1809,7 @@ bool ROCMBlas::DoBlasGemmWithAlgorithm( blas::ProfileResult *output_profile_result) { LOG(ERROR) << "rocBLAS does not currently support the GEMMwithAlgorithm operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -1909,7 +1909,7 @@ port::Status ROCMBlas::DoBlasGemmBatchedInternal( batch_stride_b = ldb * k; } - // Alocate local vectors to hold device pointers to matrices + // Allocate local vectors to hold device pointers to matrices std::vector a_raw_ptrs, b_raw_ptrs, c_raw_ptrs; for (int i = 0; i < batch_count; ++i) { // static_cast does work when converting Eigen::half* to rocblas_half*, @@ -2033,7 +2033,7 @@ bool ROCMBlas::DoBlasGemmBatched( const port::ArraySlice> *> &c_array, int ldc, int batch_count, ScratchAllocator *scratch_allocator) { LOG(ERROR) << "rocBLAS does not currently support the GEMMBatched operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2047,7 +2047,7 @@ bool ROCMBlas::DoBlasGemmBatched( const port::ArraySlice> *> &c_array, int ldc, int batch_count, ScratchAllocator *scratch_allocator) { LOG(ERROR) << "rocBLAS does not currently support the GEMMBatched operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2059,7 +2059,7 @@ bool ROCMBlas::DoBlasHemm(Stream *stream, blas::Side side, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the HEMM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2071,7 +2071,7 @@ bool ROCMBlas::DoBlasHemm(Stream *stream, blas::Side side, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the HEMM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2082,7 +2082,7 @@ bool ROCMBlas::DoBlasHerk(Stream *stream, blas::UpperLower uplo, float beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the HERK operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2093,7 +2093,7 @@ bool ROCMBlas::DoBlasHerk(Stream *stream, blas::UpperLower uplo, double beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the HERK operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2105,7 +2105,7 @@ bool ROCMBlas::DoBlasHer2k(Stream *stream, blas::UpperLower uplo, float beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the HER2K operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2117,7 +2117,7 @@ bool ROCMBlas::DoBlasHer2k(Stream *stream, blas::UpperLower uplo, double beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the HER2K operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2127,7 +2127,7 @@ bool ROCMBlas::DoBlasSymm(Stream *stream, blas::Side side, const DeviceMemory &b, int ldb, float beta, DeviceMemory *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYMM operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -2137,7 +2137,7 @@ bool ROCMBlas::DoBlasSymm(Stream *stream, blas::Side side, const DeviceMemory &b, int ldb, double beta, DeviceMemory *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYMM operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -2149,7 +2149,7 @@ bool ROCMBlas::DoBlasSymm(Stream *stream, blas::Side side, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYMM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2161,7 +2161,7 @@ bool ROCMBlas::DoBlasSymm(Stream *stream, blas::Side side, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYMM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2170,7 +2170,7 @@ bool ROCMBlas::DoBlasSyrk(Stream *stream, blas::UpperLower uplo, float alpha, const DeviceMemory &a, int lda, float beta, DeviceMemory *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYRK operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -2179,7 +2179,7 @@ bool ROCMBlas::DoBlasSyrk(Stream *stream, blas::UpperLower uplo, double alpha, const DeviceMemory &a, int lda, double beta, DeviceMemory *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYRK operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -2190,7 +2190,7 @@ bool ROCMBlas::DoBlasSyrk(Stream *stream, blas::UpperLower uplo, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYRK operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2201,7 +2201,7 @@ bool ROCMBlas::DoBlasSyrk(Stream *stream, blas::UpperLower uplo, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYRK operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2211,7 +2211,7 @@ bool ROCMBlas::DoBlasSyr2k(Stream *stream, blas::UpperLower uplo, const DeviceMemory &b, int ldb, float beta, DeviceMemory *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYR2K operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -2221,7 +2221,7 @@ bool ROCMBlas::DoBlasSyr2k(Stream *stream, blas::UpperLower uplo, const DeviceMemory &b, int ldb, double beta, DeviceMemory *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYR2K operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -2233,7 +2233,7 @@ bool ROCMBlas::DoBlasSyr2k(Stream *stream, blas::UpperLower uplo, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYR2K operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2245,7 +2245,7 @@ bool ROCMBlas::DoBlasSyr2k(Stream *stream, blas::UpperLower uplo, std::complex beta, DeviceMemory> *c, int ldc) { LOG(ERROR) << "rocBLAS does not currently support the SYR2K operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2255,7 +2255,7 @@ bool ROCMBlas::DoBlasTrmm(Stream *stream, blas::Side side, const DeviceMemory &a, int lda, DeviceMemory *b, int ldb) { LOG(ERROR) << "rocBLAS does not currently support the TRMM operation " - << "for the \"float\" dataype"; + << "for the \"float\" datatype"; return false; } @@ -2265,7 +2265,7 @@ bool ROCMBlas::DoBlasTrmm(Stream *stream, blas::Side side, const DeviceMemory &a, int lda, DeviceMemory *b, int ldb) { LOG(ERROR) << "rocBLAS does not currently support the TRMM operation " - << "for the \"double\" dataype"; + << "for the \"double\" datatype"; return false; } @@ -2276,7 +2276,7 @@ bool ROCMBlas::DoBlasTrmm(Stream *stream, blas::Side side, const DeviceMemory> &a, int lda, DeviceMemory> *b, int ldb) { LOG(ERROR) << "rocBLAS does not currently support the TRMM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2287,7 +2287,7 @@ bool ROCMBlas::DoBlasTrmm(Stream *stream, blas::Side side, const DeviceMemory> &a, int lda, DeviceMemory> *b, int ldb) { LOG(ERROR) << "rocBLAS does not currently support the TRMM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2322,7 +2322,7 @@ bool ROCMBlas::DoBlasTrsm(Stream *stream, blas::Side side, const DeviceMemory> &a, int lda, DeviceMemory> *b, int ldb) { LOG(ERROR) << "rocBLAS does not currently support the TRSM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } @@ -2333,7 +2333,7 @@ bool ROCMBlas::DoBlasTrsm(Stream *stream, blas::Side side, const DeviceMemory> &a, int lda, DeviceMemory> *b, int ldb) { LOG(ERROR) << "rocBLAS does not currently support the TRSM operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } bool ROCMBlas::DoBlasGemmStridedBatched( @@ -2392,7 +2392,7 @@ bool ROCMBlas::DoBlasGemmStridedBatched( int64 stride_c, int batch_count) { LOG(ERROR) << "rocBLAS does not currently support the " "DoBlasGemmStridedBatched operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } bool ROCMBlas::DoBlasGemmStridedBatched( @@ -2404,7 +2404,7 @@ bool ROCMBlas::DoBlasGemmStridedBatched( int64 stride_c, int batch_count) { LOG(ERROR) << "rocBLAS does not currently support the " "DoBlasGemmStridedBatched operation " - << "for the \"complex\" dataype"; + << "for the \"complex\" datatype"; return false; } diff --git a/tensorflow/stream_executor/rocm/rocm_blas.h b/tensorflow/stream_executor/rocm/rocm_blas.h index 1b73a356b88..0497b917c95 100644 --- a/tensorflow/stream_executor/rocm/rocm_blas.h +++ b/tensorflow/stream_executor/rocm/rocm_blas.h @@ -110,7 +110,7 @@ class ROCMBlas : public blas::BlasSupport { /*err_on_failure=*/false, args...); } - // A helper allocation funciton to convert raw pointers memory layout to + // A helper allocation function to convert raw pointers memory layout to // strided flavor template port::Status AllocateStridedBuffer( diff --git a/tensorflow/stream_executor/rocm/rocm_dnn.cc b/tensorflow/stream_executor/rocm/rocm_dnn.cc index 9a6ecfe70bd..8df92357e9b 100644 --- a/tensorflow/stream_executor/rocm/rocm_dnn.cc +++ b/tensorflow/stream_executor/rocm/rocm_dnn.cc @@ -2633,7 +2633,7 @@ void* MIOpenAllocatorCallback(void* ctx, size_t size_in_bytes) { } void MIOpenDeallocatorCallback(void* ctx, void* mem) { - // Don't need dealloactor since the TensorFlow heap will automatically reclaim + // Don't need deallocator since the TensorFlow heap will automatically reclaim // the memory } @@ -3910,7 +3910,7 @@ bool MIOpenSupport::DoPoolBackward( return false; } } else { - LOG(ERROR) << "Failed to calcuate tensor size to chain forward and " + LOG(ERROR) << "Failed to calculate tensor size to chain forward and " "backward pooling"; } @@ -4006,7 +4006,7 @@ bool MIOpenSupport::DoPoolBackward( return false; } } else { - LOG(ERROR) << "Failed to calcuate tensor size to chain forward and " + LOG(ERROR) << "Failed to calculate tensor size to chain forward and " "backward pooling"; } @@ -4144,7 +4144,7 @@ bool MIOpenSupport::DoNormalizeBackwardWithDimensions( } } else { LOG(ERROR) - << "Failed to calcuate tensor size to chain forward and backward LRN"; + << "Failed to calculate tensor size to chain forward and backward LRN"; } status = wrap::miopenLRNForward(miopen.handle(), normalize.handle(), &alpha, diff --git a/tensorflow/stream_executor/rocm/rocm_fft.cc b/tensorflow/stream_executor/rocm/rocm_fft.cc index 82dce9ef354..362105ce6a0 100644 --- a/tensorflow/stream_executor/rocm/rocm_fft.cc +++ b/tensorflow/stream_executor/rocm/rocm_fft.cc @@ -298,14 +298,14 @@ port::Status ROCMFftPlan::Initialize( if (ret != HIPFFT_SUCCESS) { LOG(ERROR) << "failed to create rocFFT batched plan:" << ret; return port::Status{port::error::INTERNAL, - "Failed to create rocFFT bacthed plan."}; + "Failed to create rocFFT batched plan."}; } } else { auto ret = wrap::hipfftCreate(parent, &plan_); if (ret != HIPFFT_SUCCESS) { LOG(ERROR) << "failed to create rocFFT batched plan:" << ret; return port::Status{port::error::INTERNAL, - "Failed to create rocFFT bacthed plan."}; + "Failed to create rocFFT batched plan."}; } ret = wrap::hipfftSetAutoAllocation(parent, plan_, 0); if (ret != HIPFFT_SUCCESS) { @@ -313,7 +313,7 @@ port::Status ROCMFftPlan::Initialize( << ret; return port::Status{ port::error::INTERNAL, - "Failed to set auto allocation for rocFFT bacthed plan."}; + "Failed to set auto allocation for rocFFT batched plan."}; } size_t size_in_bytes; ret = wrap::hipfftMakePlanMany( @@ -324,7 +324,7 @@ port::Status ROCMFftPlan::Initialize( if (ret != HIPFFT_SUCCESS) { LOG(ERROR) << "failed to make rocFFT batched plan:" << ret; return port::Status{port::error::INTERNAL, - "Failed to make rocFFT bacthed plan."}; + "Failed to make rocFFT batched plan."}; } if (size_in_bytes != 0) { auto allocated = scratch_allocator->AllocateBytes(size_in_bytes); @@ -338,7 +338,7 @@ port::Status ROCMFftPlan::Initialize( if (ret != HIPFFT_SUCCESS) { LOG(ERROR) << "failed to set work area for rocFFT batched plan:" << ret; return port::Status{port::error::INTERNAL, - "Failed to set work area for rocFFT bacthed plan."}; + "Failed to set work area for rocFFT batched plan."}; } } } diff --git a/tensorflow/stream_executor/scratch_allocator.h b/tensorflow/stream_executor/scratch_allocator.h index 29b4e5aa012..7ca4edc6902 100644 --- a/tensorflow/stream_executor/scratch_allocator.h +++ b/tensorflow/stream_executor/scratch_allocator.h @@ -31,7 +31,7 @@ class Stream; // buffers it has allocated at destruction. Returned memory pointers are not // owning. // -// Used by stream operations (e.g. Stream::ThenConvolveWithScratch) to optonally +// Used by stream operations (e.g. Stream::ThenConvolveWithScratch) to optionally // request scratch space to speed up the operation. class ScratchAllocator { public: diff --git a/tensorflow/stream_executor/stream_executor_pimpl.h b/tensorflow/stream_executor/stream_executor_pimpl.h index d69c309f9c4..0c5001c8b42 100644 --- a/tensorflow/stream_executor/stream_executor_pimpl.h +++ b/tensorflow/stream_executor/stream_executor_pimpl.h @@ -685,7 +685,7 @@ class StreamExecutor { std::unique_ptr rng_ GUARDED_BY(mu_); // Slot to cache the owned DeviceDescription for the underlying device - // once it has been quieried from DeviceDescription(). + // once it has been queried from DeviceDescription(). mutable std::unique_ptr device_description_ GUARDED_BY(mu_); diff --git a/tensorflow/tools/ci_build/Dockerfile.rbe.cuda10.0-cudnn7-centos6.sh b/tensorflow/tools/ci_build/Dockerfile.rbe.cuda10.0-cudnn7-centos6.sh index ca58747929f..aa324d1833a 100755 --- a/tensorflow/tools/ci_build/Dockerfile.rbe.cuda10.0-cudnn7-centos6.sh +++ b/tensorflow/tools/ci_build/Dockerfile.rbe.cuda10.0-cudnn7-centos6.sh @@ -15,7 +15,7 @@ # ============================================================================== # # Script to create a centos6 docker image. -# Before running, copy tensorrt into /tmp after downlading it from: +# Before running, copy tensorrt into /tmp after downloading it from: # https://developer.nvidia.com/nvidia-tensorrt-5x-download # # TODO(klimek): once there are downloadable images for tensorrt for centos6 diff --git a/tensorflow/tools/ci_build/Dockerfile.rbe.cuda10.1-cudnn7-centos6.sh b/tensorflow/tools/ci_build/Dockerfile.rbe.cuda10.1-cudnn7-centos6.sh index 32df0b863ee..d07e6a4da5f 100755 --- a/tensorflow/tools/ci_build/Dockerfile.rbe.cuda10.1-cudnn7-centos6.sh +++ b/tensorflow/tools/ci_build/Dockerfile.rbe.cuda10.1-cudnn7-centos6.sh @@ -15,7 +15,7 @@ # ============================================================================== # # Script to create a centos6 docker image. -# Before running, copy tensorrt into /tmp after downlading it from: +# Before running, copy tensorrt into /tmp after downloading it from: # https://developer.nvidia.com/nvidia-tensorrt-5x-download # # TODO(klimek): once there are downloadable images for tensorrt for centos6 diff --git a/tensorflow/tools/ci_build/builds/docker_test.sh b/tensorflow/tools/ci_build/builds/docker_test.sh index 39e119f8895..b2d1dbae433 100755 --- a/tensorflow/tools/ci_build/builds/docker_test.sh +++ b/tensorflow/tools/ci_build/builds/docker_test.sh @@ -75,7 +75,7 @@ fi BASE_DIR=$(upsearch "${DOCKERFILE}") if [[ -z "${BASE_DIR}" ]]; then die "FAILED: Unable to find the base directory where the dockerfile "\ -"${DOCKERFFILE} resides" +"${DOCKERFILE} resides" fi echo "Base directory: ${BASE_DIR}" diff --git a/tensorflow/tools/ci_build/builds/pip.sh b/tensorflow/tools/ci_build/builds/pip.sh index 9f8f8da7106..d9f2a4df61a 100755 --- a/tensorflow/tools/ci_build/builds/pip.sh +++ b/tensorflow/tools/ci_build/builds/pip.sh @@ -30,7 +30,7 @@ # # TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES overrides the default extra pip packages # to be installed in virtualenv before run_pip_tests.sh is called. Multiple -# pakcage names are separated with spaces. +# package names are separated with spaces. # # If NO_TEST_ON_INSTALL has any non-empty and non-0 value, the test-on-install # part will be skipped. diff --git a/tensorflow/tools/ci_build/builds/pip_new.sh b/tensorflow/tools/ci_build/builds/pip_new.sh index 79dbf9cb769..6a3c0788196 100755 --- a/tensorflow/tools/ci_build/builds/pip_new.sh +++ b/tensorflow/tools/ci_build/builds/pip_new.sh @@ -72,7 +72,7 @@ # GIT_TAG_OVERRIDE: Values for `--git_tag_override`. This flag gets passed # in as `--action_env` for bazel build and tests. # TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES: -# Additonal pip packages to be installed. +# Additional pip packages to be installed. # Caveat: pip version needs to be checked prior. # # ============================================================================== diff --git a/tensorflow/tools/ci_build/builds/test_user_ops.sh b/tensorflow/tools/ci_build/builds/test_user_ops.sh index 9da9c3b881e..0fe5acfcd9a 100755 --- a/tensorflow/tools/ci_build/builds/test_user_ops.sh +++ b/tensorflow/tools/ci_build/builds/test_user_ops.sh @@ -196,7 +196,7 @@ else "/usr/local/cuda/lib and /usr/local/cuda/lib64" fi - echo "Found CUDA library diretory at: ${CUDA_LIB_DIR}" + echo "Found CUDA library directory at: ${CUDA_LIB_DIR}" echo "" # USER_OP_SO=$(basename $(echo "${OP_KERNEL_CC}" | sed -e 's/\.cc/\.so/')) diff --git a/tensorflow/tools/ci_build/linux/cpu/run_mkl.sh b/tensorflow/tools/ci_build/linux/cpu/run_mkl.sh index bf8688284d9..30ea2846d08 100755 --- a/tensorflow/tools/ci_build/linux/cpu/run_mkl.sh +++ b/tensorflow/tools/ci_build/linux/cpu/run_mkl.sh @@ -42,7 +42,7 @@ if [[ "$MODE" == "eigen" ]]; then else CONFIG="--config=mkl" # Setting OMP_THREADS for low performing benchmarks. -# Default value(=core count) degrades perfrmance of some banchmark cases. +# Default value(=core count) degrades performance of some benchmark cases. # Optimal thread count is case specific. # An argument can be passed to script, the value of which is used if given. # Otherwise OMP_NUM_THREADS is set to 10 diff --git a/tensorflow/tools/compatibility/all_renames_v2.py b/tensorflow/tools/compatibility/all_renames_v2.py index c9edc3c9819..23962a85f72 100644 --- a/tensorflow/tools/compatibility/all_renames_v2.py +++ b/tensorflow/tools/compatibility/all_renames_v2.py @@ -612,7 +612,7 @@ addons_symbol_mappings = { "tf.contrib.image.angles_to_projective_transforms": "tfa.image.angles_to_projective_transforms", "tf.contrib.image.matrices_to_flat_transforms": - "tfa.image.matricies_to_flat_transforms", + "tfa.image.matrices_to_flat_transforms", "tf.contrib.image.rotate": "tfa.image.rotate", "tf.contrib.image.transform": diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2.py b/tensorflow/tools/compatibility/tf_upgrade_v2.py index a8c507900cf..c7bbd3815f1 100644 --- a/tensorflow/tools/compatibility/tf_upgrade_v2.py +++ b/tensorflow/tools/compatibility/tf_upgrade_v2.py @@ -1992,7 +1992,7 @@ def _pool_seed_transformer(parent, node, full_name, name, logs): def _extract_glimpse_transformer(parent, node, full_name, name, logs): def _replace_uniform_noise_node(parent, old_value): - """Replaces old_value with 'uniform' or 'guassian'.""" + """Replaces old_value with 'uniform' or 'gaussian'.""" uniform = ast.Str(s="uniform") gaussian = ast.Str(s="gaussian") new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian) diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2_test.py b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py index 92a4c0bedb7..d645b298ce3 100644 --- a/tensorflow/tools/compatibility/tf_upgrade_v2_test.py +++ b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py @@ -449,7 +449,7 @@ bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map _, _, _, new_text = self._upgrade(text) self.assertEqual("tf.compat.v1." + ns_prefix + v + "(a, b)", new_text) - def testIntializers(self): + def testInitializers(self): initializers = [ "zeros", "ones", diff --git a/tensorflow/tools/docs/doc_controls.py b/tensorflow/tools/docs/doc_controls.py index 27a1d2075e9..e66a1e52138 100644 --- a/tensorflow/tools/docs/doc_controls.py +++ b/tensorflow/tools/docs/doc_controls.py @@ -135,7 +135,7 @@ def do_not_doc_inheritable(obj): # method2 ``` - When generating docs for a class's arributes, the `__mro__` is searched and + When generating docs for a class's attributes, the `__mro__` is searched and the attribute will be skipped if this decorator is detected on the attribute on any class in the `__mro__`. @@ -178,7 +178,7 @@ def for_subclass_implementers(obj): Works on method, or other class-attributes. - When generating docs for a class's arributes, the `__mro__` is searched and + When generating docs for a class's attributes, the `__mro__` is searched and the attribute will be skipped if this decorator is detected on the attribute on any **parent** class in the `__mro__`. diff --git a/tensorflow/tools/docs/doc_generator_visitor.py b/tensorflow/tools/docs/doc_generator_visitor.py index ec2102a5935..b409566d3f7 100644 --- a/tensorflow/tools/docs/doc_generator_visitor.py +++ b/tensorflow/tools/docs/doc_generator_visitor.py @@ -166,7 +166,7 @@ class DocGeneratorVisitor(object): This function is meant to be used as the `key` to the `sorted` function. This sorting in order: - Prefers names refering to the defining class, over a subclass. + Prefers names referring to the defining class, over a subclass. Prefers names that are not in "contrib". prefers submodules to the root namespace. Prefers short names `tf.thing` over `tf.a.b.c.thing` diff --git a/tensorflow/tools/docs/parser.py b/tensorflow/tools/docs/parser.py index 61518bcbd46..994d5d4be9b 100644 --- a/tensorflow/tools/docs/parser.py +++ b/tensorflow/tools/docs/parser.py @@ -46,7 +46,7 @@ def is_free_function(py_object, full_name, index): index: The {full_name:py_object} dictionary for the public API. Returns: - True if the obeject is a stand-alone function, and not part of a class + True if the object is a stand-alone function, and not part of a class definition. """ if not tf_inspect.isfunction(py_object): @@ -235,7 +235,7 @@ class ReferenceResolver(object): return cls(doc_index=doc_index, **json_dict) def to_json_file(self, filepath): - """Converts the RefenceResolver to json and writes it to the specified file. + """Converts the ReferenceResolver to json and writes it to the specified file. Args: filepath: The file path to write the json to. diff --git a/tensorflow/tools/docs/parser_test.py b/tensorflow/tools/docs/parser_test.py index 15d4cad89cc..b5a06cab26c 100644 --- a/tensorflow/tools/docs/parser_test.py +++ b/tensorflow/tools/docs/parser_test.py @@ -32,7 +32,7 @@ from tensorflow.tools.docs import doc_controls from tensorflow.tools.docs import parser # The test needs a real module. `types.ModuleType()` doesn't work, as the result -# is a `builtin` module. Using "parser" here is arbitraty. The tests don't +# is a `builtin` module. Using "parser" here is arbitrary. The tests don't # depend on the module contents. At this point in the process the public api # has already been extracted. test_module = parser diff --git a/tensorflow/tools/docs/pretty_docs.py b/tensorflow/tools/docs/pretty_docs.py index 98b5c7a3b39..946c800def5 100644 --- a/tensorflow/tools/docs/pretty_docs.py +++ b/tensorflow/tools/docs/pretty_docs.py @@ -18,7 +18,7 @@ The adjacent `parser` module creates `PageInfo` objects, containing all data necessary to document an element of the TensorFlow API. -This module contains one public function, which handels the conversion of these +This module contains one public function, which handles the conversion of these `PageInfo` objects into a markdown string: md_page = build_md_page(page_info) diff --git a/tensorflow/tools/graph_transforms/remove_control_dependencies.cc b/tensorflow/tools/graph_transforms/remove_control_dependencies.cc index cba6b78fc5c..4a7285f1d47 100644 --- a/tensorflow/tools/graph_transforms/remove_control_dependencies.cc +++ b/tensorflow/tools/graph_transforms/remove_control_dependencies.cc @@ -19,7 +19,7 @@ limitations under the License. namespace tensorflow { namespace graph_transforms { -// Remove control depdencies in preparation for inference. +// Remove control dependencies in preparation for inference. // In the tensorflow graph, control dependencies are represented as extra // inputs which are referenced with "^tensor_name". // See node_def.proto for more details. diff --git a/tensorflow/tools/graph_transforms/transform_utils.cc b/tensorflow/tools/graph_transforms/transform_utils.cc index ccaf77868a4..85b07756b81 100644 --- a/tensorflow/tools/graph_transforms/transform_utils.cc +++ b/tensorflow/tools/graph_transforms/transform_utils.cc @@ -596,7 +596,7 @@ Status GetInOutTypes(const NodeDef& node_def, DataTypeVector* inputs, Status TensorShapeFromString(const string& shape_string, TensorShape* result) { if (shape_string.empty()) { - return errors::InvalidArgument("Specificed shape is empty."); + return errors::InvalidArgument("Specified shape is empty."); } std::vector dims_as_str = str_util::Split(shape_string, ","); std::vector dims; diff --git a/tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc b/tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc index e67add72de6..402da3ca2eb 100644 --- a/tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc +++ b/tensorflow/tools/proto_text/gen_proto_text_functions_lib_test.cc @@ -456,7 +456,7 @@ TEST(CreateProtoDebugStringLibTest, Enums) { EXPECT_PARSE_SUCCESS("", "optional_nested_enum: -0"); // TODO(amauryfa): restore the line below when protobuf::TextFormat also - // supports unknonwn enum values. + // supports unknown enum values. // EXPECT_PARSE_SUCCESS("optional_nested_enum: 6", "optional_nested_enum: 6"); EXPECT_PARSE_FAILURE("optional_nested_enum: 2147483648"); // > INT32_MAX EXPECT_PARSE_FAILURE("optional_nested_enum: BARNONE"); diff --git a/tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py b/tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py index ec8a0ba6f96..56f5507c5c6 100644 --- a/tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py +++ b/tensorflow/tools/tensorflow_builder/compat_checker/compat_checker.py @@ -117,7 +117,7 @@ def _get_func_name(): class ConfigCompatChecker(object): - """Class that checks configuration versions and depencency compatibilities. + """Class that checks configuration versions and dependency compatibilities. `ConfigCompatChecker` checks a given set of configurations and their versions against supported versions and dependency rules defined in `.ini` config file. @@ -180,7 +180,7 @@ class ConfigCompatChecker(object): """Prints a requirement and its components. Returns: - String that has concantenated information about a requirement. + String that has concatenated information about a requirement. """ info = { "section": self._section, @@ -200,7 +200,7 @@ class ConfigCompatChecker(object): req_str += "Range: {range}\n" req_str += "Exclude: {exclude}\n" req_str += "Include: {include}\n" - req_str += "Initilalized: {init}\n\n" + req_str += "Initialized: {init}\n\n" return req_str.format(**info) @@ -214,7 +214,7 @@ class ConfigCompatChecker(object): [1] String that includes `range` indicating range syntax for defining a requirement. e.g. `range(1.0, 2.0) include(3.0) exclude(1.5)` - [2] List that includes inidividual supported versions or items. + [2] List that includes individual supported versions or items. e.g. [`1.0`, `3.0`, `7.1`] For a list type requirement, it directly stores the list to @@ -380,7 +380,7 @@ class ConfigCompatChecker(object): parser.read(self.req_file) if not parser.sections(): - err_msg = "[Error] Empty confie file. " + err_msg = "[Error] Empty config file. " err_msg += "(file = %s, " % str(self.req_file) err_msg += "parser sectons = %s)" % str(parser.sections()) self.error_msg.append(err_msg) @@ -427,7 +427,7 @@ class ConfigCompatChecker(object): self.warning_msg.append(warn_msg) # Last dependency item may only or not have `]` depending - # on the identation style in the config (.ini) file. + # on the indentation style in the config (.ini) file. # If it has `[`, then either skip or remove from string. if spec_split[-1] == "]": spec_split = spec_split[:-1] diff --git a/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py b/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py index 090e3172c34..323adf368dd 100755 --- a/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py +++ b/tensorflow/tools/tensorflow_builder/config_detector/config_detector.py @@ -327,7 +327,7 @@ def get_cuda_version_all(): def get_cuda_version_default(): """Retrieves default CUDA version. - Default verion is the version found in `/usr/local/cuda/` installation. + Default version is the version found in `/usr/local/cuda/` installation. stderr is silenced by default. Setting FLAGS.debug mode will not enable it. Remove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable diff --git a/third_party/clang_toolchain/cc_configure_clang.bzl b/third_party/clang_toolchain/cc_configure_clang.bzl index 0778c43c53a..a6b87ab6971 100644 --- a/third_party/clang_toolchain/cc_configure_clang.bzl +++ b/third_party/clang_toolchain/cc_configure_clang.bzl @@ -15,8 +15,8 @@ def _cc_clang_autoconf(repo_ctx): return download_clang(repo_ctx, out_folder = "extra_tools") - overriden_tools = {"gcc": "extra_tools/bin/clang"} - cc_autoconf_impl(repo_ctx, overriden_tools) + overridden_tools = {"gcc": "extra_tools/bin/clang"} + cc_autoconf_impl(repo_ctx, overridden_tools) cc_download_clang_toolchain = repository_rule( environ = [ diff --git a/third_party/flatbuffers/build_defs.bzl b/third_party/flatbuffers/build_defs.bzl index 45f1d197359..11d3caa0299 100644 --- a/third_party/flatbuffers/build_defs.bzl +++ b/third_party/flatbuffers/build_defs.bzl @@ -17,7 +17,7 @@ def flatbuffer_library_public( include_paths = [], flatc_args = DEFAULT_FLATC_ARGS, reflection_name = "", - reflection_visiblity = None, + reflection_visibility = None, output_to_bindir = False): """Generates code files for reading/writing the given flatbuffers in the requested language using the public compiler. @@ -101,7 +101,7 @@ def flatbuffer_library_public( # entries = [ # native.FilesetEntry(files = reflection_outs), # ], - # visibility = reflection_visiblity, + # visibility = reflection_visibility, # ) def flatbuffer_cc_library( @@ -191,7 +191,7 @@ def flatbuffer_cc_library( include_paths = include_paths, flatc_args = flatc_args, reflection_name = reflection_name, - reflection_visiblity = visibility, + reflection_visibility = visibility, ) native.cc_library( name = name, diff --git a/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.py.tpl b/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.py.tpl index f06357db935..46e8aef3606 100644 --- a/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.py.tpl +++ b/third_party/gpus/crosstool/windows/msvc_wrapper_for_nvcc.py.tpl @@ -117,7 +117,7 @@ def InvokeNvcc(argv, log=False): out_file = [ f for f in argv if f.startswith('/Fo') ] if len(out_file) != 1: - raise Error('Please sepecify exactly one output file for cuda compilation.') + raise Error('Please specify exactly one output file for cuda compilation.') out = ['-o', out_file[0][len('/Fo'):]] nvcc_compiler_options, argv = GetNvccOptions(argv) @@ -136,7 +136,7 @@ def InvokeNvcc(argv, log=False): undefines, argv = GetOptionValue(argv, 'U') undefines = ['-U' + define for define in undefines] - # The rest of the unrecongized options should be passed to host compiler + # The rest of the unrecognized options should be passed to host compiler host_compiler_options = [option for option in argv if option not in (src_files + out_file)] m_options = ["-m64"] diff --git a/third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py b/third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py index 3c25c7a49d5..69fb0713d78 100755 --- a/third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py +++ b/third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py @@ -110,7 +110,7 @@ def InvokeNvcc(argv, log=False): out_file = [ f for f in argv if f.startswith('/Fo') ] if len(out_file) != 1: - raise Error('Please sepecify exactly one output file for cuda compilation.') + raise Error('Please specify exactly one output file for cuda compilation.') out = ['-o', out_file[0][len('/Fo'):]] nvcc_compiler_options, argv = GetNvccOptions(argv) @@ -129,7 +129,7 @@ def InvokeNvcc(argv, log=False): undefines, argv = GetOptionValue(argv, 'U') undefines = ['-U' + define for define in undefines] - # The rest of the unrecongized options should be passed to host compiler + # The rest of the unrecognized options should be passed to host compiler host_compiler_options = [option for option in argv if option not in (src_files + out_file)] m_options = ["-m64"] diff --git a/third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.1/windows/msvc_wrapper_for_nvcc.py b/third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.1/windows/msvc_wrapper_for_nvcc.py index e0f3224bf0c..404b8e24434 100755 --- a/third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.1/windows/msvc_wrapper_for_nvcc.py +++ b/third_party/toolchains/preconfig/centos6/gcc7-nvcc-cuda10.1/windows/msvc_wrapper_for_nvcc.py @@ -114,7 +114,7 @@ def InvokeNvcc(argv, log=False): out_file = [f for f in argv if f.startswith('/Fo')] if len(out_file) != 1: - raise RuntimeError('Please sepecify exactly one output file for cuda compilation.') + raise RuntimeError('Please specify exactly one output file for cuda compilation.') out = ['-o', out_file[0][len('/Fo'):]] nvcc_compiler_options, argv = GetNvccOptions(argv) @@ -133,7 +133,7 @@ def InvokeNvcc(argv, log=False): undefines, argv = GetOptionValue(argv, 'U') undefines = ['-U' + define for define in undefines] - # The rest of the unrecongized options should be passed to host compiler + # The rest of the unrecognized options should be passed to host compiler host_compiler_options = [ option for option in argv if option not in (src_files + out_file) ] diff --git a/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py b/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py index 510ba52fd5e..72354b133a9 100755 --- a/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py +++ b/third_party/toolchains/preconfig/ubuntu14.04/gcc-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py @@ -110,7 +110,7 @@ def InvokeNvcc(argv, log=False): out_file = [ f for f in argv if f.startswith('/Fo') ] if len(out_file) != 1: - raise Error('Please sepecify exactly one output file for cuda compilation.') + raise Error('Please specify exactly one output file for cuda compilation.') out = ['-o', out_file[0][len('/Fo'):]] nvcc_compiler_options, argv = GetNvccOptions(argv) @@ -129,7 +129,7 @@ def InvokeNvcc(argv, log=False): undefines, argv = GetOptionValue(argv, 'U') undefines = ['-U' + define for define in undefines] - # The rest of the unrecongized options should be passed to host compiler + # The rest of the unrecognized options should be passed to host compiler host_compiler_options = [option for option in argv if option not in (src_files + out_file)] m_options = ["-m64"] diff --git a/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py b/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py index 0cf26b24ff7..8602d15d85c 100755 --- a/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py +++ b/third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0/windows/msvc_wrapper_for_nvcc.py @@ -117,7 +117,7 @@ def InvokeNvcc(argv, log=False): out_file = [ f for f in argv if f.startswith('/Fo') ] if len(out_file) != 1: - raise Error('Please sepecify exactly one output file for cuda compilation.') + raise Error('Please specify exactly one output file for cuda compilation.') out = ['-o', out_file[0][len('/Fo'):]] nvcc_compiler_options, argv = GetNvccOptions(argv) @@ -136,7 +136,7 @@ def InvokeNvcc(argv, log=False): undefines, argv = GetOptionValue(argv, 'U') undefines = ['-U' + define for define in undefines] - # The rest of the unrecongized options should be passed to host compiler + # The rest of the unrecognized options should be passed to host compiler host_compiler_options = [option for option in argv if option not in (src_files + out_file)] m_options = ["-m64"]