Patch/170506 misspell (#9725)

* Fix misspells.

* Fix misspells.

* Fix misspells.
This commit is contained in:
Chris Hoyean Song 2017-05-07 03:02:54 +09:00 committed by Vijay Vasudevan
parent b981c5d1c4
commit 4be052a5fc
47 changed files with 61 additions and 61 deletions

View File

@ -40,7 +40,7 @@ TensorFlow coding style.
* Include unit tests when you contribute new features, as they help to
a) prove that your code works correctly, b) guard against future breaking
changes to lower the maintenance cost.
* Bug fixes also generally require unit tests, because the presense of bugs
* Bug fixes also generally require unit tests, because the presence of bugs
usually indicates insufficient test coverage.
* Keep API compatibility in mind when you change code in core TensorFlow,
e.g., code in [tensorflow/core](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core) and [tensorflow/python](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python).

View File

@ -115,7 +115,7 @@ class OperatorPDIdentity(operator_pd.OperatorPDBase):
"""Static check that the argument `x` is proper `shape`, `dtype`."""
# x is a typical argument e.g. to matmul or solve. In both cases, x should
# have the same type/shape since this is a square matrix. These checks are
# ususally not needed since we ususally have some tensor backing this
# usually not needed since we usually have some tensor backing this
# distribution, and the calls to tf.matmul do a shape/type check.
#
# Static checks only for efficiency, the identity should be fast.

View File

@ -41,7 +41,7 @@ def load_data(path='imdb.npz',
num_words: max number of words to include. Words are ranked
by how often they occur (in the training set) and only
the most frequent words are kept
skip_top: skip the top N most frequently occuring words
skip_top: skip the top N most frequently occurring words
(which may not be informative).
maxlen: truncate sequences after this length.
seed: random seed for sample shuffling.

View File

@ -43,7 +43,7 @@ def load_data(path='reuters.npz',
num_words: max number of words to include. Words are ranked
by how often they occur (in the training set) and only
the most frequent words are kept
skip_top: skip the top N most frequently occuring words
skip_top: skip the top N most frequently occurring words
(which may not be informative).
maxlen: truncate sequences after this length.
test_split: Fraction of the dataset to be used as test data.

View File

@ -649,7 +649,7 @@ class Layer(tf_base_layers.Layer):
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask
return None
# if masking is explictly supported, by default
# if masking is explicitly supported, by default
# carry over the input mask
return mask

View File

@ -245,7 +245,7 @@ def _check_array_lengths(inputs, targets, weights):
def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
"""Does validation on the compatiblity of targets and loss functions.
"""Does validation on the compatibility of targets and loss functions.
This helps prevent users from using loss functions incorrectly.

View File

@ -169,7 +169,7 @@ class BatchNormalization(Layer):
def normalize_inference():
if needs_broadcasting:
# In this case we must explictly broadcast all parameters.
# In this case we must explicitly broadcast all parameters.
broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape)
broadcast_moving_variance = K.reshape(self.moving_variance,
broadcast_shape)

View File

@ -221,7 +221,7 @@ def load_model(filepath, custom_objects=None):
obj: object, dict, or list.
Returns:
The same structure, where occurences
The same structure, where occurrences
of a custom object name have been replaced
with the custom object.
"""

View File

@ -156,7 +156,7 @@ def skipgrams(sequence,
of word indices (integers). If using a `sampling_table`,
word indices are expected to match the rank
of the words in a reference dataset (e.g. 10 would encode
the 10-th most frequently occuring token).
the 10-th most frequently occurring token).
Note that index 0 is expected to be a non-word and will be skipped.
vocabulary_size: int. maximum possible word index + 1
window_size: int. actually half-window.

View File

@ -810,7 +810,7 @@ def axis_order_scope(axis_order=None):
Example usage:
with lt.axis_order_scope(['x', 'y', 'z']):
# result is guranteed to have the correct axis order
# result is guaranteed to have the correct axis order
result = w + b
You can nest scopes, in which case only the inner-most scope applies, e.g.,

View File

@ -451,7 +451,7 @@ class _SparseColumn(
return input_tensor
def is_compatible(self, other_column):
"""Check compatability of two sparse columns."""
"""Check compatibility of two sparse columns."""
if self.lookup_config and other_column.lookup_config:
return self.lookup_config == other_column.lookup_config
compatible = (self.length == other_column.length and
@ -2069,7 +2069,7 @@ class _CrossedColumn(_FeatureColumn,
"hash_key",
"combiner", "ckpt_to_load_from",
"tensor_name_in_ckpt"])):
"""Represents a cross transformation also known as conjuction or combination.
"""Represents a cross transformation also known as conjunction or combination.
Instances of this class are immutable. It crosses given `columns`. Crossed
column output will be hashed to hash_bucket_size.

View File

@ -1467,8 +1467,8 @@ class SequenceInputFromFeatureColumnTest(test.TestCase):
expected_input_shape = [4, 3, embedding_dimension]
self.assertAllEqual(expected_input_shape, model_input.shape)
# `ids_tensor` consists of 7 instances of <empty>, 3 occurences of "b",
# 2 occurences of "c" and 1 instance of "a".
# `ids_tensor` consists of 7 instances of <empty>, 3 occurrences of "b",
# 2 occurrences of "c" and 1 instance of "a".
expected_gradient_values = sorted([0., 3., 2., 1.] * embedding_dimension)
actual_gradient_values = np.sort(gradients[0].values, axis=None)
self.assertAllClose(expected_gradient_values, actual_gradient_values)

View File

@ -173,7 +173,7 @@ class FeatureColumnTest(test.TestCase):
for i in range(len(b1_value)):
self.assertAllClose(b1_value[i], b2_value[i])
# Test the case when a shared_embedding_name is explictly specified.
# Test the case when a shared_embedding_name is explicitly specified.
d = fc.shared_embedding_columns(
[a1, a2],
dimension=4,

View File

@ -278,7 +278,7 @@ def _fused_batch_norm(
trainable=trainable_gamma)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections.
# appropriate collections.
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
@ -632,7 +632,7 @@ def batch_norm(inputs,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropiate collections. We disable variable partitioning while creating
# appropriate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables.
partitioner = variable_scope.get_variable_scope().partitioner

View File

@ -115,7 +115,7 @@ def dict_to_state_tuple(input_dict, cell):
def _concatenate_context_input(sequence_input, context_input):
"""Replicates `context_input` accross all timesteps of `sequence_input`.
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
@ -177,7 +177,7 @@ def build_sequence_input(features,
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features i.e. features that apply accross all time
describing context features i.e. features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
weight_collections: List of graph collections to which weights are added.
@ -419,7 +419,7 @@ def _get_dynamic_rnn_model_fn(
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
describing context features, i.e., features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
predict_probabilities: A boolean indicating whether to predict probabilities
@ -603,7 +603,7 @@ class DynamicRnnEstimator(estimator.Estimator):
describing sequence features. All items in the iterable should be
instances of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
describing context features, i.e., features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
num_classes: the number of classes for a classification problem. Only

View File

@ -163,7 +163,7 @@ class Head(object):
ModeFnOps.loss to compute and apply gradients.
logits: logits `Tensor` to be used by the head.
logits_input: `Tensor` from which to build logits, often needed when you
don't want to compute the logits. Typicaly this is the activation of the
don't want to compute the logits. Typically this is the activation of the
last hidden layer in a DNN. Some heads (like the ones responsible for
candidate sampling) intrinsically avoid computing full logits and only
accepts logits_input.

View File

@ -895,7 +895,7 @@ class BinaryClassificationHeadTest(test.TestCase):
_assert_summary_tags(self, ["loss"])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
# expected_loss is (total_weighted_loss)/1 since htere is 1 nonzero
# expected_loss is (total_weighted_loss)/1 since there is 1 nonzero
# weight.
expected_loss = 0.062652342
_assert_metrics(

View File

@ -144,7 +144,7 @@ def _prepare_features_for_sqss(features, labels, mode,
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
describing context features, i.e., features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
@ -261,7 +261,7 @@ def _read_batch(cell,
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
describing context features, i.e., features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
num_threads: The Python integer number of threads enqueuing input examples
@ -420,7 +420,7 @@ def _get_rnn_model_fn(cell_type,
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
describing context features, i.e., features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
predict_probabilities: A boolean indicating whether to predict probabilities
@ -563,7 +563,7 @@ class StateSavingRnnEstimator(estimator.Estimator):
describing sequence features. All items in the set should be instances
of classes derived from `FeatureColumn`.
context_feature_columns: An iterable containing all the feature columns
describing context features, i.e., features that apply accross all time
describing context features, i.e., features that apply across all time
steps. All items in the set should be instances of classes derived from
`FeatureColumn`.
num_classes: The number of classes for categorization. Used only and

View File

@ -473,7 +473,7 @@ class LoggingTrainable(EveryN):
def every_n_step_begin(self, step):
super(LoggingTrainable, self).every_n_step_begin(step)
# Get a list of trainable variables at the begining of every N steps.
# Get a list of trainable variables at the beginning of every N steps.
# We cannot get this in __init__ because train_op has not been generated.
trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
scope=self._scope)

View File

@ -295,7 +295,7 @@ itself, you'll see it's broken up into host and target sections. If you are
cross-compiling, you should look at customizing the target settings to match
what you need for your desired system.
## Dependency Managment
## Dependency Management
The Makefile loads in a list of dependencies stored in text files. These files
are generated from the main Bazel build by running

View File

@ -97,7 +97,7 @@ def Export(export_dir, use_checkpoint_v2):
}
# Create two filename assets and corresponding tensors.
# TODO(b/26254158) Consider adding validation of file existance as well as
# TODO(b/26254158) Consider adding validation of file existence as well as
# hashes (e.g. sha1) for consistency.
original_filename1 = tf.constant("hello1.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1)

View File

@ -840,7 +840,7 @@ class TrainTest(test.TestCase):
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Get the intial weights and biases values.
# Get the initial weights and biases values.
weights_values, biases_values = sess.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)

View File

@ -159,7 +159,7 @@ class SparsemaxLossTest(test.TestCase):
self.assertShapeEqual(q, tf_sparsemax_op)
def _test_gradient_against_estimate(self, dtype, random, use_gpu):
"""check sparsemax-loss Rop, aginst estimated-loss Rop"""
"""check sparsemax-loss Rop, against estimated-loss Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
q = np.zeros((test_obs, 10)).astype(dtype)
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1
@ -178,7 +178,7 @@ class SparsemaxLossTest(test.TestCase):
self.assertLess(err, 1e-4)
def _test_gradient_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax-loss Rop, aginst numpy Rop"""
"""check sparsemax-loss Rop, against numpy Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
q = np.zeros((test_obs, 10))
q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1

View File

@ -188,7 +188,7 @@ class SparsemaxTest(test.TestCase):
self.assertShapeEqual(z, tf_sparsemax_op)
def _test_gradient_against_estimate(self, dtype, random, use_gpu):
"""check sparsemax Rop, aginst estimated Rop"""
"""check sparsemax Rop, against estimated Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = array_ops.placeholder(dtype, name='z')
@ -204,7 +204,7 @@ class SparsemaxTest(test.TestCase):
self.assertLess(err, 1e-4)
def _test_gradient_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax Rop, aginst numpy Rop"""
"""check sparsemax Rop, against numpy Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = constant_op.constant(z, name='z')

View File

@ -79,7 +79,7 @@ class TensorForestLossHook(session_run_hook.SessionRunHook):
current_loss = run_values.results['current_loss']
current_step = run_values.results['global_step']
self.steps += 1
# Gaurd against the global step going backwards, which might happen
# Guard against the global step going backwards, which might happen
# if we recover from something.
if self.last_step == -1 or self.last_step > current_step:
logging.info('TensorForestLossHook resetting last_step.')

View File

@ -254,7 +254,7 @@ def checkpoints_iterator(checkpoint_dir,
logging.info('Timed-out waiting for a checkpoint.')
return
if timeout_fn():
# The timeout_fn indicated that we are truely done.
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.

View File

@ -18,7 +18,7 @@
This helper handles the plumbing in order to set up a feeder task to
push generated inputs to a pool of remote consumers; or to run an
identical feeding mechanism in a seperate thread in the same process.
identical feeding mechanism in a separate thread in the same process.
Example usage for distributed feeding:
@ -331,7 +331,7 @@ class Feeder(object):
they never close their queue. Second, they are added to the
`Feeder.REMOTE_QUEUE_RUNNERS` collection, rather than
`ops.GraphKeys.QUEUE_RUNNERS`, so they can be started/stopped
seperately.
separately.
Args:
queue: The queue.

View File

@ -156,7 +156,7 @@ class FeederTest(test.TestCase):
coord.join()
def testFeederSeparateThread(self):
# Start a feeder on a seperate thread, but with a shared local queue
# Start a feeder on a separate thread, but with a shared local queue
servers = self._create_local_cluster(worker=1)
coord = coordinator.Coordinator()
feed_thread = FeederThread(self, coord, servers, 'worker', 0)

View File

@ -164,7 +164,7 @@ class HParams(object):
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma seperated list of "name=value" pairs.')
help='Comma separated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():

View File

@ -508,7 +508,7 @@ class TrainTest(test.TestCase):
# Initialize the variables.
session.run(variables_lib2.global_variables_initializer())
# Get the intial weights and biases values.
# Get the initial weights and biases values.
weights_values, biases_values = session.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)

View File

@ -44,7 +44,7 @@ class VisitableAllocator : public Allocator {
};
// Needed for cases when a VisitableAllocator gets wrapped for tracking.
// Multiple-inheritance is considered acceptible in this case because
// Multiple-inheritance is considered acceptable in this case because
// VisitableAllocator is a pure virtual interface and only TrackingAllocator
// has default implementation.
class TrackingVisitableAllocator : public TrackingAllocator,

View File

@ -106,7 +106,7 @@ As an example, these steps will create a Maven project that uses TensorFlow:
The preceding command should output <tt>Hello from <i>version</i></tt>. If it
does, you've succesfully set up TensorFlow for Java and are ready to use it in
does, you've successfully set up TensorFlow for Java and are ready to use it in
Maven projects. If not, check
[Stack Overflow](http://stackoverflow.com/questions/tagged/tensorflow)
for possible solutions. You can skip reading the rest of this document.

View File

@ -648,7 +648,7 @@ class CommandHandlerRegistry(object):
3) the handler is found for the prefix, but it fails to return a
RichTextLines or raise any exception.
CommandLineExit:
If the command handler raises this type of exception, tihs method will
If the command handler raises this type of exception, this method will
simply pass it along.
"""
if not prefix:

View File

@ -55,7 +55,7 @@ class ProfileDatum(object):
@property
def exec_time(self):
"""Measures compute function exection time plus pre- and post-processing."""
"""Measures compute function execution time plus pre- and post-processing."""
return self.node_exec_stats.all_end_rel_micros

View File

@ -975,7 +975,7 @@ class DebugDumpDir(object):
slot = datum.output_slot
# In some cases (e.g., system clocks with insufficient precision),
# the upstream and downstream tensors may have identical timestamps, the
# following check examines this possibilty and avoids raising an error if
# following check examines this possibility and avoids raising an error if
# that is the case.
if not self._satisfied_at_timestamp(
pending_inputs[node], datum.timestamp, start_i=i + 1):

View File

@ -66,7 +66,7 @@ class LocalCLIDebugHook(session_run_hook.SessionRunHook,
"""Add a tensor filter.
See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details.
Override default behavior to accomodate the possibility of this method being
Override default behavior to accommodate the possibility of this method being
called prior to the initialization of the underlying
`LocalCLIDebugWrapperSession` object.

View File

@ -276,7 +276,7 @@ def subscribe(tensors, side_effects):
Subscribed tensors, which are identity copies of the passed in tensors
in the same passed in structure, but the graph has been modified
such that these are downstream of the control dependencies for
the side effect graphs. Use these functionally equivelant tensors
the side effect graphs. Use these functionally equivalent tensors
instead of the passed in tensors for further construction or running.
"""
if not hasattr(side_effects, '__iter__'):

View File

@ -250,7 +250,7 @@ class TensorFlowTestCase(googletest.TestCase):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, accross different runs the directories will be
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should

View File

@ -28,7 +28,7 @@ from tensorflow.python.platform import test
def _random_pd_matrix(n, rng):
"""Random postive definite matrix."""
"""Random positive definite matrix."""
temp = rng.randn(n, n)
return temp.dot(temp.T)

View File

@ -322,7 +322,7 @@ class BatchNormalization(base.Layer):
def _broadcast(v):
if needs_broadcasting and v is not None:
# In this case we must explictly broadcast all parameters.
# In this case we must explicitly broadcast all parameters.
return array_ops.reshape(v, broadcast_shape)
return v

View File

@ -2331,7 +2331,7 @@ def tensordot(a, b, axes, name=None):
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necesary to reshape the tensor again after matrix multiplication.
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.

View File

@ -1735,7 +1735,7 @@ def _streaming_sparse_true_positive_at_k(labels,
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(
name, _at_k_name('true_positive', k, class_id=class_id),
@ -1831,7 +1831,7 @@ def _streaming_sparse_false_negative_at_k(labels,
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(
name, _at_k_name('false_negative', k, class_id=class_id),
@ -2653,7 +2653,7 @@ def _streaming_sparse_false_positive_at_k(labels,
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incomptable shape.
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(
name, _at_k_name('false_positive', k, class_id=class_id),

View File

@ -58,7 +58,7 @@ class SparseFeature(
["index_key", "value_key", "dtype", "size", "already_sorted"])):
"""Configuration for parsing a sparse input feature from an `Example`.
Note, preferrably use `VarLenFeature` (possibly in combination with a
Note, preferably use `VarLenFeature` (possibly in combination with a
`SequenceExample`) in order to parse out `SparseTensor`s instead of
`SparseFeature` due to its simplicity.

View File

@ -366,7 +366,7 @@ class Coordinator(object):
# If any thread is still alive, wait for the grace period to expire.
# By the time this check is executed, threads may still be shutting down,
# so we add a sleep of increasing duration to give them a chance to shut
# down without loosing too many cycles.
# down without losing too many cycles.
# The sleep duration is limited to the remaining grace duration.
stop_wait_secs = 0.001
while any(t.is_alive() for t in threads) and stop_grace_period_secs >= 0.0:

View File

@ -182,7 +182,7 @@ def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples):
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to thier spec.
"""Builds a dictionary from deprecated arguments to their spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:

View File

@ -135,7 +135,7 @@ class StatusOr {
// operators, to support move-only types and avoid unnecessary copying.
StatusOr(T&& value); // NOLINT
// Move conversion operator to avoid unecessary copy.
// Move conversion operator to avoid unnecessary copy.
// T must be assignable from U.
// Not marked with explicit so the implicit conversion can happen.
template <typename U>

View File

@ -34,7 +34,7 @@ def md_files_in_dir(py_guide_src_dir):
class PyGuideParser(object):
"""Simple parsing of a guide .md file.
Decendents can override the process_*() functions (called by process())
Descendants can override the process_*() functions (called by process())
to either record infromation from the guide, or call replace_line()
to affect the return value of process().
"""