Contributing: fix typos

This commit is contained in:
Maher Jendoubi 2020-01-26 13:47:00 +01:00
parent c1c494b07e
commit 215dab52c6
15 changed files with 17 additions and 17 deletions

View File

@ -27,7 +27,7 @@ extern "C" {
// creating a new op every time. If `raw_device_name` is `NULL` or empty, it
// does not set the device name. If it's not `NULL`, then it attempts to parse
// and set the device name. It's effectively `TFE_OpSetDevice`, but it is faster
// than seperately calling it because if the existing op has the same
// than separately calling it because if the existing op has the same
// `raw_device_name`, it skips parsing and just leave as it is.
TF_CAPI_EXPORT extern void TFE_OpReset(TFE_Op* op_to_reset,
const char* op_or_function_name,

View File

@ -1569,7 +1569,7 @@ TEST_P(ModularFileSystemTest, TestRoundTrip) {
if (!status.ok())
GTEST_SKIP() << "NewRandomAccessFile() not supported: " << status;
char scratch[64 /* big enough to accomodate test_data */] = {0};
char scratch[64 /* big enough to accommodate test_data */] = {0};
StringPiece result;
status = read_file->Read(0, test_data.size(), &result, scratch);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);

View File

@ -937,7 +937,7 @@ class ConvertFusedBatchNormGradBase
// Gets the result values.
Value x_backprop, scale_backprop, offset_backprop;
if (op.is_training()) { // training
// TODO(b/145536565): handle GPU logic seperately.
// TODO(b/145536565): handle GPU logic separately.
// Infers the output type with the converted `act`.
Type feature_type = RankedTensorType::get(
{GetDimSize(act_type, feature_dim)}, kernel_type);

View File

@ -405,7 +405,7 @@ Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index,
// the latest occurrence of a ReaderDataset (e.g. CSVDataset, TFRecordDataset,
// etc...). We then add a shard after that dataset to shard the outputs of
// that dataset, in effect giving a piece to each worker. Finally, we remove
// occurences from randomness from before that point in the graph (e.g. things
// occurrences from randomness from before that point in the graph (e.g. things
// like ShuffleDataset) to ensure that `shard` returns a sensible result.
switch (policy) {
case AutoShardPolicy::OFF:

View File

@ -248,7 +248,7 @@ service EagerService {
// Contexts are always created with a deadline and no RPCs within a deadline
// will trigger a context garbage collection. KeepAlive calls can be used to
// delay this. It can also be used to validate the existance of a context ID
// delay this. It can also be used to validate the existence of a context ID
// on remote eager worker. If the context is on remote worker, return the same
// ID and the current context view ID. This is useful for checking if the
// remote worker (potentially with the same task name and hostname / port) is

View File

@ -156,7 +156,7 @@ TEST(PrepackedCacheTest, TestCacheOnCacheable) {
dst.data = dst_data;
ruy::BasicSpec<float, float> spec;
// Perform the multiplication and confirm no caching occured.
// Perform the multiplication and confirm no caching occurred.
ruy::Mul<ruy::kAllPaths>(lhs, rhs, spec, &context, &dst);
EXPECT_EQ(cache->TotalSize(), 0);

View File

@ -41,7 +41,7 @@ def replace_includes(line, supplied_headers_list):
def replace_main(line):
"""Updates any occurences of a bare main definition to the Arduino equivalent."""
"""Updates any occurrences of a bare main definition to the Arduino equivalent."""
main_match = re.match(r'(.*int )(main)(\(.*)', line)
if main_match:
line = main_match.group(1) + 'tflite_micro_main' + main_match.group(3)

View File

@ -48,7 +48,7 @@ def replace_arduino_includes(line, supplied_headers_list):
def replace_arduino_main(line):
"""Updates any occurences of a bare main definition to the Arduino equivalent."""
"""Updates any occurrences of a bare main definition to the Arduino equivalent."""
main_match = re.match(r'(.*int )(main)(\(.*)', line)
if main_match:
line = main_match.group(1) + 'tflite_micro_main' + main_match.group(3)

View File

@ -32,7 +32,7 @@ std::vector<string> GetOperatorNames(const Model& model);
// Counts the number of different types of operators in the model:
// Built-in ops, custom ops and select ops.
// Each map is mapping from the name of the operator (such as 'Conv') to its
// total number of occurences in the model.
// total number of occurrences in the model.
void CountOperatorsByType(const Model& model,
std::map<string, int>* built_in_ops,
std::map<string, int>* custom_ops,

View File

@ -107,7 +107,7 @@ class BucketBySequenceLengthTest(test_base.DatasetTestBase,
# Calculate the expected occurrence of individual batch sizes.
expected_batch_sizes[length] = \
[batch_size] * (bucket_elements // batch_size)
# Calculate the expected occurence of individual sequence lengths.
# Calculate the expected occurrence of individual sequence lengths.
expected_lengths.extend([length] * (bucket_elements // batch_size))
def build_dataset(sparse):

View File

@ -307,7 +307,7 @@ class _CategoricalEncodingCombiner(Combiner):
# Any newly created token counts in 'base_accumulator''s
# per_doc_count_dict will have a last_doc_id of -1. This is always
# less than the next doc id (which are strictly positive), so any
# future occurences are guaranteed to be counted.
# future occurrences are guaranteed to be counted.
base_accumulator.per_doc_count_dict[token]["count"] += value["count"]
return base_accumulator

View File

@ -756,7 +756,7 @@ class _TextVectorizationCombiner(Combiner):
# Any newly created token counts in 'base_accumulator''s
# per_doc_count_dict will have a last_doc_id of -1. This is always
# less than the next doc id (which are strictly positive), so any
# future occurences are guaranteed to be counted.
# future occurrences are guaranteed to be counted.
base_accumulator.per_doc_count_dict[token]["count"] += value["count"]
return base_accumulator

View File

@ -173,8 +173,8 @@ def _RGBToHSVGrad(op, grad):
This function is a piecewise continuous function as defined here:
https://en.wikipedia.org/wiki/HSL_and_HSV#From_RGB
We perform the multi variate derivative and compute all partial derivates
seperately before adding them in the end. Formulas are given before each
We perform the multivariate derivative and compute all partial derivatives
separately before adding them in the end. Formulas are given before each
partial derivative calculation.
Args:

View File

@ -1,3 +1,3 @@
Client responsible for communicating the Cloud TPU API. Released seperately from tensorflow.
Client responsible for communicating the Cloud TPU API. Released separately from tensorflow.
https://pypi.org/project/cloud-tpu-client/

View File

@ -434,8 +434,8 @@ class ListWrapper(
@_non_append_mutation.setter
def _non_append_mutation(self, value):
# Trackable only cares that a mutation occured at some point; when
# attempting to save it checks whether a mutation occured and the object is
# Trackable only cares that a mutation occurred at some point; when
# attempting to save it checks whether a mutation occurred and the object is
# in a "dirty" state but otherwise the specifics of how it got to that state
# are ignored. By contrast, the attribute cache needs to signal the mutation
# immediately since a caller could query the value of an attribute (And