Merge pull request #36230 from hjung4:spell

PiperOrigin-RevId: 292831055
Change-Id: I21f2505a902b62837781ef613c072f895fd7776a
This commit is contained in:
TensorFlower Gardener 2020-02-02 18:06:01 -08:00
commit de45c4b295
9 changed files with 11 additions and 11 deletions

View File

@ -1569,7 +1569,7 @@ TEST_P(ModularFileSystemTest, TestRoundTrip) {
if (!status.ok())
GTEST_SKIP() << "NewRandomAccessFile() not supported: " << status;
char scratch[64 /* big enough to accomodate test_data */] = {0};
char scratch[64 /* big enough to accommodate test_data */] = {0};
StringPiece result;
status = read_file->Read(0, test_data.size(), &result, scratch);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);

View File

@ -359,7 +359,7 @@ func @replication(%arg0: tensor<i1>, %arg1: tensor<i32>, %arg2: tensor<f32>) ->
// Test `tf.TPUReplicatedInput` ops are sorted by their `index` attribute.
// Non-negative `index` should preceed `index` of -1, and ordering of ops with
// Non-negative `index` should precede `index` of -1, and ordering of ops with
// `index` of -1 does not matter.
// CHECK-LABEL: func @sort_replicated_input
// CHECK-SAME: (%[[ARG_0:.*]]: tensor<i1>, %[[ARG_1:.*]]: tensor<i1>, %[[ARG_2:.*]]: tensor<i1>, %[[ARG_3:.*]]: tensor<i1>, %[[ARG_4:.*]]: tensor<i1>, %[[ARG_5:.*]]: tensor<i1>)

View File

@ -33,7 +33,7 @@ namespace {
// MatMul function is defined as: c = alpha * op(a) * op(b) + beta * c.
// Since XLA MatMul does not used alpha, beta, we set them to 1.0 and 0.0.
// Matrix lhs, rhs and out are all colum-major.
// Matrix lhs, rhs and out are all column-major.
void MatMulF32(const void* run_options_ptr, float* out, float* lhs, float* rhs,
int64 m, int64 n, int64 k, int32 transpose_lhs,
int32 transpose_rhs) {
@ -55,7 +55,7 @@ void MatMulF32(const void* run_options_ptr, float* out, float* lhs, float* rhs,
// MatMul function is defined as: c = alpha * op(a) * op(b) + beta * c.
// Since XLA MatMul does not used alpha, beta, we set them to 1.0 and 0.0.
// Matrix lhs, rhs and out are all colum-major.
// Matrix lhs, rhs and out are all column-major.
void MatMulF64(const void* run_options_ptr, double* out, double* lhs,
double* rhs, int64 m, int64 n, int64 k, int32 transpose_lhs,
int32 transpose_rhs) {

View File

@ -883,7 +883,7 @@ REGISTER_OP("_MklQuantizedMatMulWithBias")
REGISTER_OP("_MklQuantizedMatMulWithBiasAndRelu")
.Input("a: T1")
.Input("b: T2")
// TODO(intel-tf): Modify bias type as Tbias and add relevent attribute.
// TODO(intel-tf): Modify bias type as Tbias and add relevant attribute.
.Input("bias: float")
.Input("min_a: float")
.Input("max_a: float")

View File

@ -251,7 +251,7 @@ service EagerService {
// Contexts are always created with a deadline and no RPCs within a deadline
// will trigger a context garbage collection. KeepAlive calls can be used to
// delay this. It can also be used to validate the existance of a context ID
// delay this. It can also be used to validate the existence of a context ID
// on remote eager worker. If the context is on remote worker, return the same
// ID and the current context view ID. This is useful for checking if the
// remote worker (potentially with the same task name and hostname / port) is

View File

@ -156,7 +156,7 @@ TEST(PrepackedCacheTest, TestCacheOnCacheable) {
dst.data = dst_data;
ruy::BasicSpec<float, float> spec;
// Perform the multiplication and confirm no caching occured.
// Perform the multiplication and confirm no caching occurred.
ruy::Mul<ruy::kAllPaths>(lhs, rhs, spec, &context, &dst);
EXPECT_EQ(cache->TotalSize(), 0);

View File

@ -107,7 +107,7 @@ class BucketBySequenceLengthTest(test_base.DatasetTestBase,
# Calculate the expected occurrence of individual batch sizes.
expected_batch_sizes[length] = \
[batch_size] * (bucket_elements // batch_size)
# Calculate the expected occurence of individual sequence lengths.
# Calculate the expected occurrence of individual sequence lengths.
expected_lengths.extend([length] * (bucket_elements // batch_size))
def build_dataset(sparse):

View File

@ -366,7 +366,7 @@ class Network(base_layer.Layer):
@property
def _layer_checkpoint_dependencies(self):
"""Dictionary of layer dependencies to be included in the checkpoint."""
# Use getattr becuase this function can be called from __setattr__, at which
# Use getattr because this function can be called from __setattr__, at which
# point the _is_graph_network attribute has not been created.
if (not getattr(self, '_is_graph_network', False) and
base_layer_utils.is_subclassed(self)):

View File

@ -434,8 +434,8 @@ class ListWrapper(
@_non_append_mutation.setter
def _non_append_mutation(self, value):
# Trackable only cares that a mutation occured at some point; when
# attempting to save it checks whether a mutation occured and the object is
# Trackable only cares that a mutation occurred at some point; when
# attempting to save it checks whether a mutation occurred and the object is
# in a "dirty" state but otherwise the specifics of how it got to that state
# are ignored. By contrast, the attribute cache needs to signal the mutation
# immediately since a caller could query the value of an attribute (And