diff --git a/tensorflow/core/framework/node_def_builder_test.cc b/tensorflow/core/framework/node_def_builder_test.cc index cc583df348b..03b4456393f 100644 --- a/tensorflow/core/framework/node_def_builder_test.cc +++ b/tensorflow/core/framework/node_def_builder_test.cc @@ -235,7 +235,7 @@ TEST_F(NodeDefBuilderTest, Polymorphic) { op: "Polymorphic" input: "a" attr { key: "T" value { type: DT_BOOL } } )proto"); - // Conficting Attr() + // Conflicting Attr() ExpectFailure(Builder().Input(FakeInput(DT_BOOL)).Attr("T", DT_STRING), "Inconsistent values for attr 'T' DT_BOOL vs. DT_STRING while"); diff --git a/tensorflow/core/graph/collective_order.cc b/tensorflow/core/graph/collective_order.cc index e835259d64e..80750319cb2 100644 --- a/tensorflow/core/graph/collective_order.cc +++ b/tensorflow/core/graph/collective_order.cc @@ -143,7 +143,7 @@ Status CreateControlDependencies( // Insert control dependencies defined by `dependency_edges` in `graph`. If // `order_type` is `kEdges`, insert explicit control edges, else if `order_type` -// is `kAttrs`, encode depdencies as an attribute on collective node. +// is `kAttrs`, encode dependencies as an attribute on collective node. Status InsertControlDependencies( Graph* graph, GraphCollectiveOrder order_type, const absl::flat_hash_map>& diff --git a/tensorflow/core/graph/graph_constructor_test.cc b/tensorflow/core/graph/graph_constructor_test.cc index 1912f2fc96a..13b8ecc5f1e 100644 --- a/tensorflow/core/graph/graph_constructor_test.cc +++ b/tensorflow/core/graph/graph_constructor_test.cc @@ -951,7 +951,7 @@ TEST_F(GraphConstructorTest, ImportGraphDef) { EXPECT_TRUE(HasControlEdge("D", sink)); EXPECT_EQ(9, graph_.num_edges()); - // Importing again should fail because of node name collissions. + // Importing again should fail because of node name collisions. s = ImportGraphDef(opts, def, &graph_, nullptr); EXPECT_TRUE(errors::IsInvalidArgument(s)) << s; diff --git a/tensorflow/core/graph/mkl_layout_pass_test.cc b/tensorflow/core/graph/mkl_layout_pass_test.cc index cc4e9c7ca0f..e2ab90de3fe 100644 --- a/tensorflow/core/graph/mkl_layout_pass_test.cc +++ b/tensorflow/core/graph/mkl_layout_pass_test.cc @@ -572,7 +572,7 @@ TEST_F(MklLayoutPassTest, Input_ControlEdge_PadWithConv2D_Positive) { // Test if output control edges does not duplicate after merge. // If both the merging ops have output control edge to a common op, // then after merge, the merged op will have only one control edge -// to that commom op. +// to that common op. // padding is VALID type // A = input(image), B = input(paddings), C= Pad = input of conv2D, // D=input(filter), E = Conv2D, Z = Zeta @@ -1501,7 +1501,7 @@ TEST_F(MklLayoutPassTest, Input_ControlEdge_PadWithFusedConv2D_Positive) { // ts that there are no duplicate output control edges after merge. // If both the merging ops have output control edge to a common op, // then after merge, the merged op will have only one control edge -// to that commom op. This test only add additional output control edge check +// to that common op. This test only add additional output control edge check // based on the previous test NodeMerge_PadWithFusedConv2D_Positive1 // padding is VALID type // A = input(image), B = input(paddings), C = Pad(A, B) = input of conv2D, diff --git a/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc b/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc index a9065acd6fa..427e6562d0d 100644 --- a/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc +++ b/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc @@ -411,7 +411,7 @@ struct CropAndResizeBackpropImage { d.stream(), config.virtual_thread_count, grads_image.data())); } - // Configurate interpolation method. + // Configure interpolation method. InterpolationMethod method = BILINEAR; if (method_name == "nearest") { method = NEAREST; diff --git a/tensorflow/core/kernels/data/experimental/indexed_dataset_op.cc b/tensorflow/core/kernels/data/experimental/indexed_dataset_op.cc index e75e6e4b80b..758eef0be5e 100644 --- a/tensorflow/core/kernels/data/experimental/indexed_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/indexed_dataset_op.cc @@ -149,7 +149,7 @@ class MaterializedDatasetResource : public ResourceBase { // A wrapper class for storing an `IndexedDataset` instance in a DT_VARIANT // tensor. Objects of the wrapper class own a reference on an instance of an -// `IndexedTensor` and the wrapper's copy constructor and desctructor take care +// `IndexedTensor` and the wrapper's copy constructor and destructor take care // of managing the reference count. // // NOTE: This is not a feature-complete implementation of the DT_VARIANT diff --git a/tensorflow/core/kernels/debug_ops_test.cc b/tensorflow/core/kernels/debug_ops_test.cc index 273962be997..12ea7db1ea1 100644 --- a/tensorflow/core/kernels/debug_ops_test.cc +++ b/tensorflow/core/kernels/debug_ops_test.cc @@ -364,7 +364,7 @@ TEST_F(DebugNumericSummaryOpTest, Float_only_valid_values) { 7.33333333333, // variance of non-inf and non-nan elements. static_cast(DT_FLOAT), // dtype 2.0, // Number of dimensions. - 2.0, 3.0}); // Dimensoin sizes. + 2.0, 3.0}); // Dimension sizes. test::ExpectTensorNear(expected, *GetOutput(0), 1e-8); } diff --git a/tensorflow/core/kernels/dynamic_stitch_op.cc b/tensorflow/core/kernels/dynamic_stitch_op.cc index 5b8845b675d..0bcd9607f68 100644 --- a/tensorflow/core/kernels/dynamic_stitch_op.cc +++ b/tensorflow/core/kernels/dynamic_stitch_op.cc @@ -167,7 +167,7 @@ class DynamicStitchOpGPU : public DynamicStitchOpImplBase { // merged that aren't covered by an index in indices. What should we do? if (first_dim_size > 0) { // because the collision requirements, we have to deal with - // collion first before send data to gpu kernel. + // collision first before send data to gpu kernel. // TODO(ekelsen): Instead of doing a serial scan on the CPU to pick the // last of duplicated indices, it could instead be done of the GPU // implicitly using atomics to make sure the last index is the final diff --git a/tensorflow/core/kernels/eigen_benchmark_cpu_test.cc b/tensorflow/core/kernels/eigen_benchmark_cpu_test.cc index ec949ddc845..12fa7f3409d 100644 --- a/tensorflow/core/kernels/eigen_benchmark_cpu_test.cc +++ b/tensorflow/core/kernels/eigen_benchmark_cpu_test.cc @@ -8,7 +8,7 @@ You may obtain a copy of the License at Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONT OF ANY KIND, either express or implied. +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ diff --git a/tensorflow/core/kernels/fuzzing/string_split_fuzz.cc b/tensorflow/core/kernels/fuzzing/string_split_fuzz.cc index 4dbb6a71160..b3b637bac72 100644 --- a/tensorflow/core/kernels/fuzzing/string_split_fuzz.cc +++ b/tensorflow/core/kernels/fuzzing/string_split_fuzz.cc @@ -34,7 +34,7 @@ class FuzzStringSplit : public FuzzSession { Tensor delimiter_tensor(tensorflow::DT_STRING, TensorShape({})); if (size > 0) { - // The spec for split is that the delimeter should be 0 or 1 characters. + // The spec for split is that the delimiter should be 0 or 1 characters. // Naturally, fuzz it with something larger. (This omits the possibility // of handing it a > int32_max size string, which should be tested for in // an explicit test). diff --git a/tensorflow/core/kernels/hexagon/hexagon_control_wrapper.h b/tensorflow/core/kernels/hexagon/hexagon_control_wrapper.h index 1b382996f88..9c57c1d4298 100644 --- a/tensorflow/core/kernels/hexagon/hexagon_control_wrapper.h +++ b/tensorflow/core/kernels/hexagon/hexagon_control_wrapper.h @@ -76,7 +76,7 @@ class HexagonControlWrapper final : public IRemoteFusedGraphExecutor { // TODO(satok): Use actual data passed by FillInputNode and remove // std::vector dummy_input_float_{}; std::unordered_map> input_tensor_data_{}; - // Dummy byte array for cosnt node. + // Dummy byte array for const node. // TODO(satok): Remove std::unordered_map> dummy_const_data_{}; diff --git a/tensorflow/core/kernels/mkl_concat_op.cc b/tensorflow/core/kernels/mkl_concat_op.cc index d8fbb83940a..2398269e0b1 100644 --- a/tensorflow/core/kernels/mkl_concat_op.cc +++ b/tensorflow/core/kernels/mkl_concat_op.cc @@ -484,7 +484,7 @@ class MklConcatOp : public OpKernel { output_tensor->flat().size() * sizeof(uint8)); } - // This method finds the most commom format across all MKL inputs + // This method finds the most common format across all MKL inputs // Inputs: // 1. input_shapes: shapes of input (MKL) tensors. // 2. concat_dim: concat dimension. diff --git a/tensorflow/core/kernels/mkl_conv_ops.cc b/tensorflow/core/kernels/mkl_conv_ops.cc index da999d28b1f..0354f725f3c 100644 --- a/tensorflow/core/kernels/mkl_conv_ops.cc +++ b/tensorflow/core/kernels/mkl_conv_ops.cc @@ -96,7 +96,7 @@ struct MklConvFwdParams { typedef mkldnn::convolution_forward::primitive_desc ConvFwdPd; // With quantization, input, filter, and output can have different types -// so we use differnt template parameter for each type +// so we use different template parameter for each type template class MklConvFwdPrimitive : public MklPrimitive { diff --git a/tensorflow/core/kernels/tensor_flag_utils.h b/tensorflow/core/kernels/tensor_flag_utils.h index f406c73a297..ab59eecc256 100644 --- a/tensorflow/core/kernels/tensor_flag_utils.h +++ b/tensorflow/core/kernels/tensor_flag_utils.h @@ -36,7 +36,7 @@ std::vector ParseRowStartIndices( // Returns Status::OK() if and only if config is a float scalar or a matrix with // dimensions M x 3. If config is a scalar then config must be in the range -// [0, 1.0). If confix is a matrix then config must have shape M x 3, all of +// [0, 1.0). If config is a matrix then config must have shape M x 3, all of // its entries must be positive, and entries in the last column may not // exceed 1.0. If config is a matrix then it may not be empty. Status ValidateSparseMatrixShardingConfig(const Tensor& config); diff --git a/tensorflow/core/nccl/nccl_manager.h b/tensorflow/core/nccl/nccl_manager.h index 7cf2c85f3e8..f2f15f8ec64 100644 --- a/tensorflow/core/nccl/nccl_manager.h +++ b/tensorflow/core/nccl/nccl_manager.h @@ -189,7 +189,7 @@ class NcclManager { // the corresponding NCCL/CUDA error string. Status GetCommunicator(Collective* collective, Communicator** communicator); - // Adds a participant device to the local `Collective` instance correponding + // Adds a participant device to the local `Collective` instance corresponding // to `collective_key`. Launches the `Collective` if it is ready, which it // checks by calling `CheckReady()`. Also performs consistency and sanity // checks before launching. diff --git a/tensorflow/core/platform/default/device_tracer.cc b/tensorflow/core/platform/default/device_tracer.cc index ffcb38fdcd2..edb51f4d49b 100644 --- a/tensorflow/core/platform/default/device_tracer.cc +++ b/tensorflow/core/platform/default/device_tracer.cc @@ -560,7 +560,7 @@ void DeviceTracerImpl::AddCorrelationId(uint32 correlation_id, auto *params = reinterpret_cast( cbInfo->functionParams); if (VLOG_IS_ON(2)) { - VLOG(2) << "LAUNCH stream " << params->hStream << " correllation " + VLOG(2) << "LAUNCH stream " << params->hStream << " correlation " << cbInfo->correlationId << " kernel " << cbInfo->symbolName; } const string annotation = diff --git a/tensorflow/core/platform/strong_hash.h b/tensorflow/core/platform/strong_hash.h index 999fd2e4b30..a276780d13b 100644 --- a/tensorflow/core/platform/strong_hash.h +++ b/tensorflow/core/platform/strong_hash.h @@ -24,7 +24,7 @@ namespace tensorflow { // This is a strong keyed hash function interface for strings. // The hash function is deterministic on the content of the string within the // process. The key of the hash is an array of 2 uint64 elements. -// A strong hash make it dificult, if not infeasible, to compute inputs that +// A strong hash make it difficult, if not infeasible, to compute inputs that // hash to the same bucket. // // Usage: diff --git a/tensorflow/core/profiler/internal/tfprof_op.cc b/tensorflow/core/profiler/internal/tfprof_op.cc index 3dce1d85db3..6e9178c7164 100644 --- a/tensorflow/core/profiler/internal/tfprof_op.cc +++ b/tensorflow/core/profiler/internal/tfprof_op.cc @@ -182,7 +182,7 @@ const ShowMultiNode* TFOp::ShowInternal(const Options& opts, // TODO(xpan): Is it the right choice? root_->formatted_str = display_str; } - // Populate the chidren field. + // Populate the children field. auto* pre_pb = root_->mutable_proto(); for (auto& show_node : show_nodes) { pre_pb->clear_children(); diff --git a/tensorflow/core/util/mkl_util.h b/tensorflow/core/util/mkl_util.h index 91f9bc03625..fcd2e18944a 100644 --- a/tensorflow/core/util/mkl_util.h +++ b/tensorflow/core/util/mkl_util.h @@ -1581,7 +1581,7 @@ inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) { /// Function to calculate strides given tensor shape in Tensorflow order /// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention, -/// dimesion with size 1 is outermost dimension; while dimension with size 4 is +/// dimension with size 1 is outermost dimension; while dimension with size 4 is /// innermost dimension. So strides for this tensor would be {4 * 3 * 2, /// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}. /// diff --git a/tensorflow/core/util/proto/descriptors.cc b/tensorflow/core/util/proto/descriptors.cc index 271c85efd88..c3797f1a8a8 100644 --- a/tensorflow/core/util/proto/descriptors.cc +++ b/tensorflow/core/util/proto/descriptors.cc @@ -25,7 +25,7 @@ namespace { // Build a `DescriptorPool` from the named file or URI. The file or URI // must be available to the current TensorFlow environment. // -// The file must contiain a serialized `FileDescriptorSet`. See +// The file must contain a serialized `FileDescriptorSet`. See // `GetDescriptorPool()` for more information. Status GetDescriptorPoolFromFile( tensorflow::Env* env, const string& filename,