Merge pull request #44408 from Molkree:typos_in_core_dir

PiperOrigin-RevId: 341123345
Change-Id: Ibc4b77c23e7e2ff057a1b8ba34f6b639526f2140
This commit is contained in:
TensorFlower Gardener 2020-11-06 15:01:58 -08:00
commit 8687106526
31 changed files with 66 additions and 72 deletions

View File

@ -54,7 +54,7 @@ END
name: "noise"
description: <<END
indicates if the noise should `uniform`, `gaussian`, or
`zero`. The default is `uniform` which means the the noise type
`zero`. The default is `uniform` which means the noise type
will be decided by `uniform_noise`.
END
}

View File

@ -55,7 +55,7 @@ END
name: "noise"
description: <<END
indicates if the noise should `uniform`, `gaussian`, or
`zero`. The default is `uniform` which means the the noise type
`zero`. The default is `uniform` which means the noise type
will be decided by `uniform_noise`.
END
}

View File

@ -15,7 +15,7 @@ END
in_arg {
name: "shape"
description: <<END
The desired shape of the the output tensor. If left unspecified (empty),
The desired shape of the output tensor. If left unspecified (empty),
the minimal shape required to contain all the elements in the ragged tensor
(the natural shape) will be used. If some dimensions are left unspecified, then
the size of the natural shape is used in that dimension.

View File

@ -54,7 +54,7 @@ END
name: "identical_element_shapes"
description: <<END
If true (default is false), then all
elements in the TensorArray will be expected to have have identical shapes.
elements in the TensorArray will be expected to have identical shapes.
This allows certain behaviors, like dynamically checking for
consistent shapes on write, and being able to fill in properly
shaped zero tensors on stack -- even if the element_shape attribute

View File

@ -46,8 +46,8 @@ class BufRendezvous {
~BufRendezvous();
// Inform all all waiting parties that this BufRendezvous is defunct
// because of an error Status interrupting the Step.
// Inform all waiting parties that this BufRendezvous is defunct because of
// an error Status interrupting the Step.
void StartAbort(const Status& s);
struct Hook;

View File

@ -739,7 +739,7 @@ Status EagerContext::AddFunctionDef(const FunctionDef& fdef,
return errors::InvalidArgument(
"Attempting to add a duplicate function with name: ",
fdef.signature().name(), " where the previous and current ",
"definitions differ. Previous definiton: ",
"definitions differ. Previous definition: ",
prev_fdef->DebugString(),
" and current definition: ", fdef.DebugString());
}
@ -1233,9 +1233,8 @@ Status EagerContext::UpdateRemoteMaster(
tf_shared_lock l(remote_state_mu_);
if (context_id != context_id_) {
return errors::InvalidArgument(
"Failed to update remote remote master context due to invalid ",
"context id. Request id = ", context_id,
" but current id = ", context_id_);
"Failed to update remote master context due to invalid context id. ",
"Request id = ", context_id, " but current id = ", context_id_);
}
}

View File

@ -212,7 +212,7 @@ bool MklEagerOpRewrite::FastCheckIfKernelRegistered(std::string op_name,
registered_kernels_map_.insert(
std::make_pair(registered_kernels_key, kernel_registered));
} else {
// Kernel is visited atleast once. return stored registration result.
// Kernel is visited at least once. Return stored registration result.
kernel_registered = kernel_element->second;
}

View File

@ -1897,8 +1897,8 @@ uint64 GPUKernelTracker::MaybeQueue(OpKernelContext* ctx) {
mem_since_last_ += mem_used;
int weight = 1;
// Note that if all {max_bytes, max_interval, max_pending} are zero then
// we we track every single kernel with no pending cap. This can happen
// if timestamped_allocator alone was specified.
// we track every single kernel with no pending cap. This can happen if
// timestamped_allocator alone was specified.
if ((mem_since_last_ < params_.max_bytes) &&
(ops_since_last_ < params_.max_interval)) {
return 0;

View File

@ -785,10 +785,9 @@ Status InlineFunctionBody(const FunctionLibraryDefinition& flib_def, Graph* g,
// always have input_control_node when we need it.
if (output_control_node && output_control_node->in_edges().empty()) {
if (input_control_node) {
VLOG(4)
<< "Add add a control edge between input and output control nodes: "
<< input_control_node->name() << " to "
<< output_control_node->name();
VLOG(4) << "Add a control edge between input and output control nodes: "
<< input_control_node->name() << " to "
<< output_control_node->name();
g->AddControlEdge(input_control_node, output_control_node,
kDoNotCheckDuplicates);
} else {

View File

@ -1571,7 +1571,7 @@ class MklLayoutRewritePass : public GraphOptimizationPass {
}
// If the depth_radius of LRN is not 2, then MKL DNN takes unoptimized
// path. The unoptimized path is slow. Thus we dont rewrite the node
// path. The unoptimized path is slow. Thus we don't rewrite the node
// and use default Eigen. But for depth_radius=2, MKL DNN optimized
// path is taken, i.e., eigen node is rewritten by MKl DNN node.
static bool LrnRewrite(const Node* n) {
@ -1969,7 +1969,7 @@ class MklLayoutRewritePass : public GraphOptimizationPass {
// Helper function used by FixMklMetaDataEdges. Fixes the metadata edge
// pointed by 'e_metadata' corresponding to the data edge 'e_data' in graph
// 'g'. Returns true is fixup was done; otherwise, it returns false.
// 'g'. Returns true if fixup was done; otherwise, it returns false.
bool FixMklMetaDataEdgeIfNeeded(std::unique_ptr<Graph>* g, const Edge* e_data,
const Edge* e_metadata);
@ -3460,7 +3460,7 @@ Status MklLayoutRewritePass::MergeConv2DBackpropFilterWithBiasAddGrad(
// This is because BackpropFilterWithBias is going to emit bias output also.
NodeBuilder nb(fltr->name(), csinfo_.conv2d_grad_filter_with_bias);
// Since Conv2DBackpropFilterWithBias has same number of inputs as
// Conv2DBackpropFilter, we can just copy input edges directly. We dont need
// Conv2DBackpropFilter, we can just copy input edges directly. We don't need
// to copy any data input of BiasAddGrad because that input also goes to
// Conv2DBackpropFilter.
const int fltr_ins = fltr->num_inputs();
@ -3795,7 +3795,7 @@ Status MklLayoutRewritePass::RewriteNode(std::unique_ptr<Graph>* g,
return ret_status;
}
// TODO(mdfaijul): Is there any other elegent way to check for quantized ops
// TODO(mdfaijul): Is there any other elegant way to check for quantized ops
// having attributes other than "T"?
// Current implementation reflects only QuantizedConv2D and its fused Ops.
const MklLayoutRewritePass::RewriteInfo*
@ -3983,7 +3983,7 @@ MklLayoutRewritePass::CheckForNodeFusion(Node* a) const {
for (auto fi = finfo_.begin(); fi != finfo_.end(); ++fi) {
//
// Make sure node "a" and its succeding nodes (b, c ...), match the pattern
// Make sure node "a" and its succeeding nodes (b, c ...), match the pattern
// defined in fusion info (ops[0], ops[1], ...),
// a.k.a. "a->b->c" matches "op1->op2->op3"
//

View File

@ -75,8 +75,8 @@ class ProcessFunctionLibraryRuntime {
~ProcessFunctionLibraryRuntime() {
// Deleting the FunctionLibraryRuntime map will delete the function handles
// registered in it, which may call ReleaseHandle in this class again to
// release their sub-function. These circular calls may casue segfault
// since the flr_map_ may has already been deleted. Explicitly releasing
// release their sub-function. These circular calls may cause segfault
// since the flr_map_ may have already been deleted. Explicitly releasing
// flr_map_ here and checking flr_map_ in ReleaseHandle to avoid this.
flr_map_.reset();
}
@ -171,7 +171,7 @@ class ProcessFunctionLibraryRuntime {
bool* is_cross_process) const;
// Delegates to the local FLR that owns state corresponding to `handle` and
// tells it to release it. If the `handle` isnt' needed at all, the local FLR
// tells it to release it. If the `handle` isn't needed at all, the local FLR
// might call RemoveHandle on this to get rid of the state owned by the Proc
// FLR.
// For multi-device functions, calls ReleaseHandle on local FLRs for each

View File

@ -100,11 +100,10 @@ ScopedAllocatorContainer::~ScopedAllocatorContainer() {
VLOG(2) << "~ScopedAllocatorContainer " << this << " step " << step_id_
<< " on " << mgr_->device_name();
mutex_lock l(mu_);
// In normal execution the table should be empty and all of its
// contents deleted via Drop. When when a step ends early
// (e.g. through abnormal termination) we need to clean up
// explicitly. So long as graph execution of the associated step has
// completely terminated this should be safe.
// In normal execution the table should be empty and all of its contents
// deleted via Drop. When a step ends early (e.g. through abnormal
// termination) we need to clean up explicitly. So long as graph execution
// of the associated step has completely terminated this should be safe.
for (auto& it : allocators_) {
if (it.second.field_index == ScopedAllocator::kBackingIndex) {
delete it.second.scoped_allocator;

View File

@ -263,7 +263,7 @@ class MasterSession::ReffedClientGraph : public core::RefCounted {
// The interface to the worker. Owned.
WorkerInterface* worker = nullptr;
// After registeration with the worker, graph_handle identifies
// After registration with the worker, graph_handle identifies
// this partition on the worker.
string graph_handle;

View File

@ -704,8 +704,8 @@ class Model {
// (e.g. CPU, memory). The logic for collecting this information assumes that
// the collection is not repeatedly disabled and enabled. As a consequence,
// the implementation starts collecting resource usage when it encounters a
// tunable parameter (because the information is used for for tuning the value
// of the parameter) and never stops.
// tunable parameter (because the information is used for tuning the value of
// the parameter) and never stops.
std::atomic<bool> collect_resource_usage_;
};

View File

@ -289,7 +289,7 @@ TEST_F(LocalRendezvousTest, RandomSendRecv) {
// configured with only 16 threads. Furthermore, because the
// threadpool may execute the closures in an arbitrary order, we
// must use RecvAsync below. Otherwise, blocking Recv() may run
// before all all the Send() and deadlock.
// before all the Send() and deadlock.
static const int N = 100;
random::PhiloxRandom philox(testing::RandomSeed(), 17);
random::SimplePhilox rnd(&philox);

View File

@ -105,7 +105,7 @@ void ExpectClose(const Tensor& x, const Tensor& y, double atol = -1.0,
double rtol = -1.0);
// Expects "x" and "y" are tensors of the same type T, same shape, and
// equal equal values. Consider using ExpectEqual above instead.
// equal values. Consider using ExpectEqual above instead.
template <typename T>
void ExpectTensorEqual(const Tensor& x, const Tensor& y) {
EXPECT_EQ(x.dtype(), DataTypeToEnum<T>::value);

View File

@ -1016,7 +1016,7 @@ TEST_F(GraphPropertiesTest, IdentityPassingShape) {
TEST_F(GraphPropertiesTest, SkippingValueInferenceForLargeTensors) {
// When using aggressive_shape_inference, we run EvaluateNode() for
// allowlisted ops and small input / output tensors. For instance, Fill op is
// evaluated and produces output tensor value if output tensor size is smal
// evaluated and produces output tensor value if output tensor size is small
// (currently, fewer than 17 elements); otherwise we don't run EvaluateNode().
// This is to avoid wasting time and memory for producing huge tensors (e.g.,
// initializing a large table using Fill.
@ -1132,7 +1132,7 @@ TEST_F(GraphPropertiesTest, PackWithIdentityInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
// Same to PackWithConstInput test case, but a, b, c, and d are Identity ops
// from Const.
// If output_tensors_as_shape is not not set for those Shape ops or Pack op
// If output_tensors_as_shape is not set for those Shape ops or Pack op
// doesn't take input_tensors_as_shape, Fill op's input doesn't have value;
// hence, its output shape becomes unknown.
Output a0 = ops::Const(s.WithOpName("a0"), 1, {});
@ -1197,7 +1197,7 @@ TEST_F(GraphPropertiesTest, FunctionWithDtResourceInput) {
break;
}
}
// We cannot infer the function output shape correclty without those attr,
// We cannot infer the function output shape correctly without those attr,
// but still it shouldn't fail; also, there can be some shapes we can
// infer in such a case. In this test graph,
// z2 of the function node just returns x input; hence, even if _Arg's shape
@ -2377,7 +2377,7 @@ TEST_F(GraphPropertiesTest,
TF_ASSERT_OK(properties.InferStatically(true));
const auto& y1_output_properties = properties.GetOutputProperties("y1");
// y1=reshape(x1), but x1's shape in unknown, so y1 should be [-1, 10].
// The first dimensino should not be 10.
// The first dimension should not be 10.
EXPECT_EQ(y1_output_properties.size(), 1);
EXPECT_EQ(y1_output_properties[0].shape().dim_size(), 2);
EXPECT_LT(y1_output_properties[0].shape().dim(0).size(), 0);

View File

@ -191,7 +191,7 @@ EIGEN_ALWAYS_INLINE PerCacheLineParameters<T1> CalculatePerCacheLineParameters(
} else if (in_y >= resized_height) {
in_y = (resized_height * 2.0f) - (in_y + 1.0f + pad_offset);
}
// Here's where do do the actual resize.
// Here's where to do the actual resize.
in_y *= st.height_scale;
const int64 top_y_index = static_cast<int64>(std::floor(in_y));
const int64 bottom_y_index =

View File

@ -66,7 +66,7 @@ static void SpatialMaxPoolWithArgMaxHelper(
context, include_batch_in_index,
errors::Internal(
"SpatialMaxPoolWithArgMaxHelper requires include_batch_in_index "
"to be True when when input_backprop != nullptr"));
"to be True when input_backprop != nullptr"));
}
typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>

View File

@ -213,8 +213,7 @@ __global__ void MaxPoolBackward(const int nthreads,
}
}
// The parameters to the kernels in the gradient gradient function is as
// follows:
// The parameters to the kernels in the gradient function is as follows:
// nthreads: the number of threads, which is equal to the output size. The
// gradient of the MaxPooling gradient w.r.t. the output data has a
// dimensions of N*C*Hout*Wout
@ -310,8 +309,7 @@ __global__ void MaxPoolGradBackwardNoMaskNHWC(
}
}
// The parameters to the kernels in the gradient gradient function is as
// follows:
// The parameters to the kernels in the gradient function is as follows:
// nthreads: the number of threads, which is equal to the output size. The
// gradient of the MaxPooling gradient w.r.t. the output data has a
// dimensions of N*C*Hout*Wout

View File

@ -341,7 +341,7 @@ TEST_P(ParameterizedQuantizeAndDequantizeTest,
// Then it is dequantized to:
// (slice_idx + 1) * {-1, -63.0/127, 0, 38.0/127, 102.0/127, 70/127, 64/127}
// With int8, each slice of the the tensor is quantized to
// With int8, each slice of the tensor is quantized to
// {-127, -64, 0, 38, 102, 70, 64}.
// Scale is: (slice_idx + 1) / 127
// Then it is dequantized to:

View File

@ -151,10 +151,9 @@ class WhereCPUOp : public OpKernel {
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
// TODO(ebrevdo): Replace single-threaded copy with a
// multithreaded block copy by getting block counts above instead
// of a global NumTrue, then having each block filled in in
// separate threads below.
// TODO(ebrevdo): Replace single-threaded copy with a multithreaded block
// copy by getting block counts above instead of a global NumTrue, then
// having each block filled in separate threads below.
int64 found_true = 0;
#define HANDLE_DIM(NDIM) \

View File

@ -172,8 +172,8 @@ class SqliteStatement {
/// The OrDie version returns `!is_done` which, if true, indicates a
/// row is available.
///
/// This statement should be Reset() or destructed when when finished
/// with the result.
/// This statement should be Reset() or destructed when finished with
/// the result.
Status Step(bool* is_done);
bool StepOrDie() TF_MUST_USE_RESULT;
@ -182,8 +182,8 @@ class SqliteStatement {
/// If a row isn't returned, an internal error Status is returned
/// that won't be reflected in the connection error state.
///
/// This statement should be Reset() or destructed when when finished
/// with the result.
/// This statement should be Reset() or destructed when finished with
/// the result.
Status StepOnce();
const SqliteStatement& StepOnceOrDie();

View File

@ -113,10 +113,10 @@ class SnappyInputBuffer : public InputStreamInterface {
// Next unread byte in `output_buffer_`
char* next_out_;
// Number of unread bytes bytes available at `next_in_` in `input_buffer_`.
// Number of unread bytes available at `next_in_` in `input_buffer_`.
size_t avail_in_ = 0;
// Number of unread bytes bytes available at `next_out_` in `output_buffer_`.
// Number of unread bytes available at `next_out_` in `output_buffer_`.
size_t avail_out_ = 0;
// Number of *uncompressed* bytes that have been read from this stream.

View File

@ -42,8 +42,8 @@ ZlibOutputBuffer::~ZlibOutputBuffer() {
}
Status ZlibOutputBuffer::Init() {
// Output buffer size should be greater than 1 because deflation needs atleast
// one byte for book keeping etc.
// Output buffer size should be greater than 1 because deflation needs at
// least one byte for book keeping etc.
if (output_buffer_capacity_ <= 1) {
return errors::InvalidArgument(
"output_buffer_bytes should be greater than "

View File

@ -46,7 +46,7 @@ enum JPEGErrors {
};
// Prevent bad compiler behavior in ASAN mode by wrapping most of the
// arguments in a struct struct.
// arguments in a struct.
class FewerArgsForCompiler {
public:
FewerArgsForCompiler(int datasize, const UncompressFlags& flags, int64* nwarn,
@ -146,8 +146,8 @@ uint8* UncompressLow(const void* srcdata, FewerArgsForCompiler* argball) {
case 3:
if (cinfo.jpeg_color_space == JCS_CMYK ||
cinfo.jpeg_color_space == JCS_YCCK) {
// Always use cmyk for output in a 4 channel jpeg. libjpeg has a builtin
// decoder. We will further convert to rgb below.
// Always use cmyk for output in a 4 channel jpeg. libjpeg has a
// built-in decoder. We will further convert to rgb below.
cinfo.out_color_space = JCS_CMYK;
} else {
cinfo.out_color_space = JCS_RGB;
@ -623,7 +623,7 @@ bool CompressInternal(const uint8* srcdata, int width, int height,
JOCTET* buffer = nullptr;
// NOTE: for broader use xmp_metadata should be made a unicode string
// NOTE: for broader use xmp_metadata should be made a Unicode string
CHECK(srcdata != nullptr);
CHECK(output != nullptr);
// This struct contains the JPEG compression parameters and pointers to

View File

@ -639,7 +639,7 @@ TEST(NcclManagerTest, CommunicatorKey) {
}
#if !TENSORFLOW_USE_ROCM
// ROCm platform currently does not support simulating a mutli-node
// ROCm platform currently does not support simulating a multi-node
// environment, on a single node with multiple GPUS. So tests that rely
// upon such simulation need to be skipped on the ROCm platform
@ -664,7 +664,7 @@ TYPED_TEST(NcclManagerTest, MultiNodeSingle) {
}
#if !TENSORFLOW_USE_ROCM
// ROCm platform currently does not support simulating a mutli-node
// ROCm platform currently does not support simulating a multi-node
// environment, on a single node with multiple GPUS. So tests that rely
// upon such simulation need to be skipped on the ROCm platform
@ -858,7 +858,7 @@ TYPED_TEST(NcclManagerTest, BroadcastInconsistentSource) {
}
#if !TENSORFLOW_USE_ROCM
// ROCm platform currently does not support simulating a mutli-node
// ROCm platform currently does not support simulating a multi-node
// environment, on a single node with multiple GPUS. So tests that rely
// upon such simulation need to be skipped on the ROCm platform
@ -867,7 +867,7 @@ TYPED_TEST(NcclManagerTest, AbortThenReset) {
using TestCase = typename TestFixture::TestCase;
const int num_nodes = 2;
std::vector<NodeState> nodes(num_nodes);
// First do a normal all-reduce to simulate the the case when there're
// First do a normal all-reduce to simulate the case when there're
// multiple communicators.
this->RunMultiNodeAllReduceTest(nodes, /* num_ranks_per_node */ 1);

View File

@ -504,7 +504,7 @@ TEST(GcsFileSystemTest, NewRandomAccessFile_WithLocationConstraintCaching) {
string bucket = "gs://bucket/random_access.txt";
string another_bucket = "gs://anotherbucket/random_access.txt";
// Multiple calls should only cause one request to the location api.
// Multiple calls should only cause one request to the location API.
TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file));
TF_EXPECT_OK(fs.NewRandomAccessFile(bucket, nullptr, &file));
@ -780,7 +780,7 @@ TEST(GcsFileSystemTest, NewRandomAccessFile_WithBlockCache_MaxStaleness) {
// this loop 10 times. This shows that the underlying FileBlockCache persists
// across file close/open boundaries.
for (int i = 0; i < 10; i++) {
// Create two files. Since these files have the same name name and the max
// Create two files. Since these files have the same name and the max
// staleness of the filesystem is > 0, they will share the same blocks.
std::unique_ptr<RandomAccessFile> file1;
std::unique_ptr<RandomAccessFile> file2;

View File

@ -44,7 +44,7 @@ int NumSchedulableCPUs();
// This value is either the number of schedulable CPUs, or a value specific to
// the underlying cluster management. Applications should assume this value can
// change throughout the lifetime of the process. This function must not be
// called during initialization, i.e., before before main() has started.
// called during initialization, i.e., before main() has started.
int MaxParallelism();
// Returns an estimate for the maximum parallelism for this process on the

View File

@ -41,7 +41,7 @@ typedef std::vector<std::string> string_vec;
// magic prefix, and return true; or return false on error.
// Print the platform strings embedded in the binary file_name and return 0,
// on on error return 2.
// or on error return 2.
static int PrintStrings(const std::string file_name) {
int rc = 0;
string_vec str;

View File

@ -1414,7 +1414,7 @@ class MklDnnData {
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// execute Conv2D, we need memory primitive for I and F. But if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
@ -2010,7 +2010,7 @@ inline bool IsConv1x1StrideNot1(memory::dims filter_dims,
} // namespace tensorflow
/////////////////////////////////////////////////////////////////////
// Macros for handling registeration for various types
// Macros for handling registration for various types
/////////////////////////////////////////////////////////////////////
#define REGISTER_TEST_FLOAT32(TEST) REGISTER_TEST(TEST, DT_FLOAT, Float32Input);