Merge pull request #11349 from chris-chris/patch/170707-misspell

Fix misspells :)
This commit is contained in:
Shanqing Cai 2017-07-07 10:31:49 -04:00 committed by GitHub
commit 37d00efb0a
15 changed files with 18 additions and 18 deletions

View File

@ -440,7 +440,7 @@ Status ParallelCpuExecutable::ExecuteComputeFunctions(
// TODO(b/27458679) Manage scheduling based on in-flight concurrency limits.
// For example, if we expect a library conv/matmul call to run at max
// concurrency, we should not dispatch runnable instructions until the
// libary call is finished (to avoid expensive cache invalidation).
// library call is finished (to avoid expensive cache invalidation).
Executor executor(functions, run_options, &pending, &results,
buffer_pointers.data(), profile_counters.data(),
assignment_.get());

View File

@ -495,7 +495,7 @@ TEST_F(HloAliasAnalysisTest, NestedWhiles) {
};
// Build separate condition computations so the call graph is flat. The
// callgraph is always flattened in the compiler pipeline, and the flattened
// callgraph enables representive interference analysis.
// callgraph enables representative interference analysis.
HloComputation* condition1 =
module_->AddEmbeddedComputation(build_cond_computation());
HloComputation* condition2 =

View File

@ -106,7 +106,7 @@ void CategoricalFeatureColumnHandler::GenerateFeatureSplitCandidates(
NodeStats left_node_stats(learner_config, left_gradient_stats);
NodeStats right_node_stats(learner_config, right_gradient_stats);
// Generate split candiate and update best split candidate for the
// Generate split candidate and update best split candidate for the
// current root if needed.
FeatureSplitCandidate split_candidate(
slot_id_,

View File

@ -93,7 +93,7 @@ void DenseQuantizedFeatureColumnHandler::GenerateFeatureSplitCandidates(
NodeStats left_node_stats(learner_config, left_gradient_stats);
NodeStats right_node_stats(learner_config, right_gradient_stats);
// Generate split candiate.
// Generate split candidate.
const float threshold = dense_quantiles_(bucket_id);
FeatureSplitCandidate split_candidate(
slot_id_, CreateDenseSplitNode(dense_feature_column_, threshold),

View File

@ -109,7 +109,7 @@ void SparseQuantizedFeatureColumnHandler::GenerateFeatureSplitCandidates(
NodeStats left_node_stats(learner_config, left_gradient_stats);
NodeStats right_node_stats(learner_config, right_gradient_stats);
// Generate split candiate.
// Generate split candidate.
const float threshold = sparse_quantiles_(bucket_id);
FeatureSplitCandidate split_candidate(
slot_id_,
@ -147,7 +147,7 @@ void SparseQuantizedFeatureColumnHandler::GenerateFeatureSplitCandidates(
NodeStats left_node_stats(learner_config, left_gradient_stats);
NodeStats right_node_stats(learner_config, right_gradient_stats);
// Generate split candiate.
// Generate split candidate.
const float threshold = sparse_quantiles_(bucket_id - 1);
FeatureSplitCandidate split_candidate(
slot_id_,

View File

@ -800,7 +800,7 @@ class WALSModel(object):
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
sum_weights: The sum of the weights corresponding to sp_input. This
can be used with unregularized loss to caluclate the root weighted
can be used with unregularized loss to calculate the root weighted
squared error.
"""
assert isinstance(sp_input, sparse_tensor.SparseTensor)

View File

@ -689,11 +689,11 @@ class InputStatisticsFromMiniBatch(object):
values = features[TrainEvalFeatures.VALUES]
else:
# times and values may not be available, for example during prediction. We
# still need to retreive our variables so that they can be read from, even
# still need to retrieve our variables so that they can be read from, even
# if we're not going to update them.
times = None
values = None
# Create/retreive variables representing input statistics, initialized
# Create/retrieve variables representing input statistics, initialized
# without data to avoid deadlocking if variables are initialized before
# queue runners are started.
with variable_scope.variable_scope("input_statistics"):

View File

@ -196,7 +196,7 @@ class ChainingStateManager(_OverridableStateManager):
return time // self._state_saving_interval
def _get_cached_states(self, times):
"""Retreive cached states for a batch of times."""
"""Retrieve cached states for a batch of times."""
read_chunk_numbers = self._get_chunk_number(times)
looked_up_state = list(self._cached_states.lookup(
math_ops.cast(read_chunk_numbers, dtypes.int64)))
@ -242,7 +242,7 @@ class ChainingStateManager(_OverridableStateManager):
# written to the next bucket). This assumes fixed missing times (i.e. if we
# were presented with times [10, 50] we will never see times [30, 50]).
#
# TODO(allenl): Retreive the highest time less than the current time rather
# TODO(allenl): Retrieve the highest time less than the current time rather
# than relying on fixed bucketing.
write_chunk_numbers = math_ops.maximum(
self._get_chunk_number(array_ops.concat(

View File

@ -150,7 +150,7 @@ class StateInterpolatingAnomalyDetector(FilteringStepPostprocessor):
This is simply Bayes' theorem, where p(data | anomaly) is the
alternative/anomaly distribution, p(data | not anomaly) is the model's
predicted distribution, and anomaly_prior_probability is the prior probability
of an anomaly occuring (user-specified, defaulting to 1%).
of an anomaly occurring (user-specified, defaulting to 1%).
Rather than computing p(anomaly | data) directly, we use the odds ratio:

View File

@ -70,7 +70,7 @@ class StructuralEnsemble(state_space_model.StateSpaceIndependentEnsemble):
`observation_noise`, `level_noise`, `trend noise`, `seasonality_noise`, and
`transient` are (typically scalar) Gaussian random variables whose variance is
learned from data, and that variance is not time dependant in this
learned from data, and that variance is not time dependent in this
implementation. Level noise is optional due to its similarity with observation
noise in some cases. Seasonality is enforced by constraining a full cycle of
seasonal variables to have zero expectation, allowing seasonality to adapt

View File

@ -298,7 +298,7 @@ class InfeedQueue(object):
input_tensors is a list of lists of Tensors whose types and shapes are used
to set the queue configuration. The length of the outer list is the number
of shards required, and each inner list is the tuple of Tensors to use to
determine the types and shapes of the correponding shard. This method
determine the types and shapes of the corresponding shard. This method
depends on the shard dimension, and calling it freezes the shard policy.
Args:

View File

@ -73,7 +73,7 @@ bool ConsumeEquals(StringPiece* description) {
return false;
}
// Split `*orig` into two pieces at the first occurence of `split_ch`.
// Split `*orig` into two pieces at the first occurrence of `split_ch`.
// Returns whether `split_ch` was found. Afterwards, `*before_split`
// contains the maximum prefix of the input `*orig` that doesn't
// contain `split_ch`, and `*orig` contains everything after the

View File

@ -66,7 +66,7 @@ target 'YourProjectName'
```
- Then you run ```pod install``` to download and install the
TensorFlow-experimental pod, and finaly perform
TensorFlow-experimental pod, and finally perform
```open YourProjectName.xcworkspace``` and add your code.
- In your apps "Build Settings", make sure to add $(inherited) to sections

View File

@ -180,7 +180,7 @@ def assert_same_structure(nest1, nest2, check_types=True):
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as
well, incuding the keys of dictionaries. If set to `False`, for example
well, including the keys of dictionaries. If set to `False`, for example
a list and a tuple of objects will look the same if they have the same
size.

View File

@ -54,7 +54,7 @@ class _Errors(object):
"""Add an error to the collection.
Args:
full_name: The path to the file in which the error occured.
full_name: The path to the file in which the error occurred.
message: The message to display with the error.
"""
self._errors.append((full_name, message))