PR #26235: typos in tensorflow/core fixed
Imported from GitHub PR #26235 Copybara import of the project: - d2d879abd683dab3dfdab2414109231b4d1c9452 Update tfprof_op.cc by Siju <siju.samuel@huawei.com> - 58ed5984d2b6d94f6b5ddf898c06e6d90ba90dfe Update dynamic_stitch_op.cc by Siju <siju.samuel@huawei.com> - 5287c0c63593cf4d4f6e361839b9c97b6ca657cc Update graph_constructor_test.cc by Siju <siju.samuel@huawei.com> - 815e3faf355f6951d2a7ccd448b7b82b1b4fc201 Update mkl_concat_op.cc by Siju <siju.samuel@huawei.com> - 07f6b58e642ff943d963b380094ac7e29a151435 Update mkl_layout_pass_test.cc by Siju <siju.samuel@huawei.com> - b31b8b1c6cc6068718a2b53879fd926f2266801d Update eigen_benchmark_cpu_test.cc by Siju <siju.samuel@huawei.com> - be9a9cef0fb49e6626ff9d4df090f29d4107cd9f Update node_def_builder_test.cc by Siju <siju.samuel@huawei.com> - c8e931b0dbb42525249825735d5ff505c2bcfa47 Update crop_and_resize_op_gpu.cu.cc by Siju <siju.samuel@huawei.com> - 93fbb1651ce4da38f9bc0bac1ecb1233cdd44db4 Update tensor_flag_utils.h by Siju <siju.samuel@huawei.com> - 8e06ffe61af6a094176b77d1d60dccf7a8ba2d9d Update descriptors.cc by Siju <siju.samuel@huawei.com> - e387eb2db20387597ba75c096d40a529598b0a9c Update dependency_optimizer.cc by Siju <siju.samuel@huawei.com> - b44146b60e0176cbf21657ea3af9748b4e6843ef Update device_tracer.cc by Siju <siju.samuel@huawei.com> - e6378de52869584bfaef4f0fef447a322fbad38f Update nccl_manager.h by Siju <siju.samuel@huawei.com> - e84e0fa13a6982b9983eb8b07643d578d17d877a Update hexagon_control_wrapper.h by Siju <siju.samuel@huawei.com> - c4c12ce562d50def5c0fd4657749aa516b024d04 Update string_split_fuzz.cc by Siju <siju.samuel@huawei.com> - 9924f805777ab92d2eb23f8b5de9a422e11a681c Update collective_order.cc by Siju <siju.samuel@huawei.com> - d3916ed593ae10da9f99e6f63f514ea4b046a8df Update indexed_dataset_op.cc by Siju <siju.samuel@huawei.com> - c9da842bb575210469343cb6c036e8ebdb4b6715 Update mkl_conv_ops.cc by Siju <siju.samuel@huawei.com> - 15fee4b6eb8ba2a7a023373c3bf4184f41fbe0ec Update strong_hash.h by Siju <siju.samuel@huawei.com> - a5b911e6ec1f8481ad58b8394d7f578cec80e3a6 Update debug_ops_test.cc by Siju <siju.samuel@huawei.com> - abcedcb7ae06c7c673f187f24011bba50b1714c5 Update mkl_util.h by Siju <siju.samuel@huawei.com> - c3354ff5ae2cf6ca6225ca01b58c45d1eda423c9 Merge branch 'master' into patch-44 by Siju <sijusamuel@gmail.com> - 9f35087704751435dff37a18326ece224e273c37 Merge c3354ff5ae2cf6ca6225ca01b58c45d1eda423c9 into e82c1... by Siju <sijusamuel@gmail.com> COPYBARA_INTEGRATE_REVIEW=https://github.com/tensorflow/tensorflow/pull/26235 from siju-samuel:patch-44 c3354ff5ae2cf6ca6225ca01b58c45d1eda423c9 PiperOrigin-RevId: 237521323
This commit is contained in:
parent
d3b9ce5b4b
commit
249644e6e0
@ -235,7 +235,7 @@ TEST_F(NodeDefBuilderTest, Polymorphic) {
|
||||
op: "Polymorphic" input: "a"
|
||||
attr { key: "T" value { type: DT_BOOL } } )proto");
|
||||
|
||||
// Conficting Attr()
|
||||
// Conflicting Attr()
|
||||
ExpectFailure(Builder().Input(FakeInput(DT_BOOL)).Attr("T", DT_STRING),
|
||||
"Inconsistent values for attr 'T' DT_BOOL vs. DT_STRING while");
|
||||
|
||||
|
@ -143,7 +143,7 @@ Status CreateControlDependencies(
|
||||
|
||||
// Insert control dependencies defined by `dependency_edges` in `graph`. If
|
||||
// `order_type` is `kEdges`, insert explicit control edges, else if `order_type`
|
||||
// is `kAttrs`, encode depdencies as an attribute on collective node.
|
||||
// is `kAttrs`, encode dependencies as an attribute on collective node.
|
||||
Status InsertControlDependencies(
|
||||
Graph* graph, GraphCollectiveOrder order_type,
|
||||
const absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>>&
|
||||
|
@ -951,7 +951,7 @@ TEST_F(GraphConstructorTest, ImportGraphDef) {
|
||||
EXPECT_TRUE(HasControlEdge("D", sink));
|
||||
EXPECT_EQ(9, graph_.num_edges());
|
||||
|
||||
// Importing again should fail because of node name collissions.
|
||||
// Importing again should fail because of node name collisions.
|
||||
s = ImportGraphDef(opts, def, &graph_, nullptr);
|
||||
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
|
||||
|
||||
|
@ -572,7 +572,7 @@ TEST_F(MklLayoutPassTest, Input_ControlEdge_PadWithConv2D_Positive) {
|
||||
// Test if output control edges does not duplicate after merge.
|
||||
// If both the merging ops have output control edge to a common op,
|
||||
// then after merge, the merged op will have only one control edge
|
||||
// to that commom op.
|
||||
// to that common op.
|
||||
// padding is VALID type
|
||||
// A = input(image), B = input(paddings), C= Pad = input of conv2D,
|
||||
// D=input(filter), E = Conv2D, Z = Zeta
|
||||
@ -1501,7 +1501,7 @@ TEST_F(MklLayoutPassTest, Input_ControlEdge_PadWithFusedConv2D_Positive) {
|
||||
// ts that there are no duplicate output control edges after merge.
|
||||
// If both the merging ops have output control edge to a common op,
|
||||
// then after merge, the merged op will have only one control edge
|
||||
// to that commom op. This test only add additional output control edge check
|
||||
// to that common op. This test only add additional output control edge check
|
||||
// based on the previous test NodeMerge_PadWithFusedConv2D_Positive1
|
||||
// padding is VALID type
|
||||
// A = input(image), B = input(paddings), C = Pad(A, B) = input of conv2D,
|
||||
|
@ -411,7 +411,7 @@ struct CropAndResizeBackpropImage<GPUDevice, T> {
|
||||
d.stream(), config.virtual_thread_count, grads_image.data()));
|
||||
}
|
||||
|
||||
// Configurate interpolation method.
|
||||
// Configure interpolation method.
|
||||
InterpolationMethod method = BILINEAR;
|
||||
if (method_name == "nearest") {
|
||||
method = NEAREST;
|
||||
|
@ -149,7 +149,7 @@ class MaterializedDatasetResource : public ResourceBase {
|
||||
|
||||
// A wrapper class for storing an `IndexedDataset` instance in a DT_VARIANT
|
||||
// tensor. Objects of the wrapper class own a reference on an instance of an
|
||||
// `IndexedTensor` and the wrapper's copy constructor and desctructor take care
|
||||
// `IndexedTensor` and the wrapper's copy constructor and destructor take care
|
||||
// of managing the reference count.
|
||||
//
|
||||
// NOTE: This is not a feature-complete implementation of the DT_VARIANT
|
||||
|
@ -364,7 +364,7 @@ TEST_F(DebugNumericSummaryOpTest, Float_only_valid_values) {
|
||||
7.33333333333, // variance of non-inf and non-nan elements.
|
||||
static_cast<double>(DT_FLOAT), // dtype
|
||||
2.0, // Number of dimensions.
|
||||
2.0, 3.0}); // Dimensoin sizes.
|
||||
2.0, 3.0}); // Dimension sizes.
|
||||
|
||||
test::ExpectTensorNear<double>(expected, *GetOutput(0), 1e-8);
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ class DynamicStitchOpGPU : public DynamicStitchOpImplBase<T> {
|
||||
// merged that aren't covered by an index in indices. What should we do?
|
||||
if (first_dim_size > 0) {
|
||||
// because the collision requirements, we have to deal with
|
||||
// collion first before send data to gpu kernel.
|
||||
// collision first before send data to gpu kernel.
|
||||
// TODO(ekelsen): Instead of doing a serial scan on the CPU to pick the
|
||||
// last of duplicated indices, it could instead be done of the GPU
|
||||
// implicitly using atomics to make sure the last index is the final
|
||||
|
@ -8,7 +8,7 @@ You may obtain a copy of the License at
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONT OF ANY KIND, either express or implied.
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
@ -34,7 +34,7 @@ class FuzzStringSplit : public FuzzSession {
|
||||
Tensor delimiter_tensor(tensorflow::DT_STRING, TensorShape({}));
|
||||
|
||||
if (size > 0) {
|
||||
// The spec for split is that the delimeter should be 0 or 1 characters.
|
||||
// The spec for split is that the delimiter should be 0 or 1 characters.
|
||||
// Naturally, fuzz it with something larger. (This omits the possibility
|
||||
// of handing it a > int32_max size string, which should be tested for in
|
||||
// an explicit test).
|
||||
|
@ -76,7 +76,7 @@ class HexagonControlWrapper final : public IRemoteFusedGraphExecutor {
|
||||
// TODO(satok): Use actual data passed by FillInputNode and remove
|
||||
// std::vector<float> dummy_input_float_{};
|
||||
std::unordered_map<int, std::vector<uint8>> input_tensor_data_{};
|
||||
// Dummy byte array for cosnt node.
|
||||
// Dummy byte array for const node.
|
||||
// TODO(satok): Remove
|
||||
std::unordered_map<int, std::vector<uint8>> dummy_const_data_{};
|
||||
|
||||
|
@ -484,7 +484,7 @@ class MklConcatOp : public OpKernel {
|
||||
output_tensor->flat<uint8>().size() * sizeof(uint8));
|
||||
}
|
||||
|
||||
// This method finds the most commom format across all MKL inputs
|
||||
// This method finds the most common format across all MKL inputs
|
||||
// Inputs:
|
||||
// 1. input_shapes: shapes of input (MKL) tensors.
|
||||
// 2. concat_dim: concat dimension.
|
||||
|
@ -96,7 +96,7 @@ struct MklConvFwdParams {
|
||||
typedef mkldnn::convolution_forward::primitive_desc ConvFwdPd;
|
||||
|
||||
// With quantization, input, filter, and output can have different types
|
||||
// so we use differnt template parameter for each type
|
||||
// so we use different template parameter for each type
|
||||
template <typename T, typename Tinput, typename Tfilter, typename Tbias,
|
||||
typename Toutput>
|
||||
class MklConvFwdPrimitive : public MklPrimitive {
|
||||
|
@ -36,7 +36,7 @@ std::vector<Tindices> ParseRowStartIndices(
|
||||
|
||||
// Returns Status::OK() if and only if config is a float scalar or a matrix with
|
||||
// dimensions M x 3. If config is a scalar then config must be in the range
|
||||
// [0, 1.0). If confix is a matrix then config must have shape M x 3, all of
|
||||
// [0, 1.0). If config is a matrix then config must have shape M x 3, all of
|
||||
// its entries must be positive, and entries in the last column may not
|
||||
// exceed 1.0. If config is a matrix then it may not be empty.
|
||||
Status ValidateSparseMatrixShardingConfig(const Tensor& config);
|
||||
|
@ -189,7 +189,7 @@ class NcclManager {
|
||||
// the corresponding NCCL/CUDA error string.
|
||||
Status GetCommunicator(Collective* collective, Communicator** communicator);
|
||||
|
||||
// Adds a participant device to the local `Collective` instance correponding
|
||||
// Adds a participant device to the local `Collective` instance corresponding
|
||||
// to `collective_key`. Launches the `Collective` if it is ready, which it
|
||||
// checks by calling `CheckReady()`. Also performs consistency and sanity
|
||||
// checks before launching.
|
||||
|
@ -560,7 +560,7 @@ void DeviceTracerImpl::AddCorrelationId(uint32 correlation_id,
|
||||
auto *params = reinterpret_cast<const cuLaunchKernel_params *>(
|
||||
cbInfo->functionParams);
|
||||
if (VLOG_IS_ON(2)) {
|
||||
VLOG(2) << "LAUNCH stream " << params->hStream << " correllation "
|
||||
VLOG(2) << "LAUNCH stream " << params->hStream << " correlation "
|
||||
<< cbInfo->correlationId << " kernel " << cbInfo->symbolName;
|
||||
}
|
||||
const string annotation =
|
||||
|
@ -24,7 +24,7 @@ namespace tensorflow {
|
||||
// This is a strong keyed hash function interface for strings.
|
||||
// The hash function is deterministic on the content of the string within the
|
||||
// process. The key of the hash is an array of 2 uint64 elements.
|
||||
// A strong hash make it dificult, if not infeasible, to compute inputs that
|
||||
// A strong hash make it difficult, if not infeasible, to compute inputs that
|
||||
// hash to the same bucket.
|
||||
//
|
||||
// Usage:
|
||||
|
@ -182,7 +182,7 @@ const ShowMultiNode* TFOp::ShowInternal(const Options& opts,
|
||||
// TODO(xpan): Is it the right choice?
|
||||
root_->formatted_str = display_str;
|
||||
}
|
||||
// Populate the chidren field.
|
||||
// Populate the children field.
|
||||
auto* pre_pb = root_->mutable_proto();
|
||||
for (auto& show_node : show_nodes) {
|
||||
pre_pb->clear_children();
|
||||
|
@ -1581,7 +1581,7 @@ inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
|
||||
|
||||
/// Function to calculate strides given tensor shape in Tensorflow order
|
||||
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
|
||||
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
|
||||
/// dimension with size 1 is outermost dimension; while dimension with size 4 is
|
||||
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
|
||||
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
|
||||
///
|
||||
|
@ -25,7 +25,7 @@ namespace {
|
||||
// Build a `DescriptorPool` from the named file or URI. The file or URI
|
||||
// must be available to the current TensorFlow environment.
|
||||
//
|
||||
// The file must contiain a serialized `FileDescriptorSet`. See
|
||||
// The file must contain a serialized `FileDescriptorSet`. See
|
||||
// `GetDescriptorPool()` for more information.
|
||||
Status GetDescriptorPoolFromFile(
|
||||
tensorflow::Env* env, const string& filename,
|
||||
|
Loading…
Reference in New Issue
Block a user