fix: typos using misspell

fix: typos

This PR is part of a campaign to fix a lot of typos on github!
You can see the progress on https://github.com/fixTypos/fix_typos/

https://github.com/client9/misspell
This commit is contained in:
Andreas Solleder 2017-05-20 06:26:40 +02:00
parent f2a46993e3
commit 1f6ed06221
172 changed files with 225 additions and 225 deletions
configure
tensorflow
compiler/xla
contrib
bayesflow/python/ops
boosted_trees/lib/testutil
cloud
cudnn_rnn/python/ops
data/python/framework
distributions/python
graph_editor
hooks
image
keras/python/keras
kernel_methods/python/mappers
labeled_tensor/python/ops
layers/python/layers
learn/python/learn
legacy_seq2seq/python/kernel_tests
memory_stats/python/kernel_tests
metrics/python/ops
rnn/python/ops
seq2seq/python/ops
slim
tensorboard/plugins/projector
training/python/training
core

4
configure vendored
View File

@ -230,7 +230,7 @@ if [ "$TF_NEED_MKL" == "1" ]; then # TF_NEED_MKL
if [ -z "$MKL_INSTALL_PATH" ]; then
MKL_INSTALL_PATH=$default_mkl_path
fi
# Result returned from "read" will be used unexpanded. That make "~" unuseable.
# Result returned from "read" will be used unexpanded. That make "~" unusable.
# Going through one more level of expansion to handle that.
MKL_INSTALL_PATH=`${PYTHON_BIN_PATH} -c "import os; print(os.path.realpath(os.path.expanduser('${MKL_INSTALL_PATH}')))"`
fi
@ -565,7 +565,7 @@ while true; do
if [ -z "$CUDNN_INSTALL_PATH" ]; then
CUDNN_INSTALL_PATH=$default_cudnn_path
fi
# Result returned from "read" will be used unexpanded. That make "~" unuseable.
# Result returned from "read" will be used unexpanded. That make "~" unusable.
# Going through one more level of expansion to handle that.
CUDNN_INSTALL_PATH=`"${PYTHON_BIN_PATH}" -c "import os; print(os.path.realpath(os.path.expanduser('${CUDNN_INSTALL_PATH}')))"`
fi

View File

@ -65,7 +65,7 @@ class Array4D {
Fill(T());
}
// Creates a 4D array, initalized to value.
// Creates a 4D array, initialized to value.
Array4D(int64 planes, int64 depth, int64 height, int64 width, T value)
: Array4D(planes, depth, height, width) {
Fill(value);

View File

@ -56,7 +56,7 @@ class ExecutableBuildOptions {
// If set, this specifies the layout of the result of the computation. If not
// set, the service will chose the layout of the result. A Shape is used to
// store the layout to accomodate tuple result shapes. A value of nullptr
// store the layout to accommodate tuple result shapes. A value of nullptr
// indicates the option has not been set.
ExecutableBuildOptions& set_result_layout(const Shape& shape_with_layout);
const Shape* result_layout() const;

View File

@ -128,7 +128,7 @@ class LiteralUtil {
// Creates a new value that has the equivalent value as literal, but conforms
// to new_layout; e.g. a literal matrix that was in {0, 1} minor-to-major
// dimension layout can be re-layed-out as {1, 0} minor-to-major dimension
// dimension layout can be re-laid-out as {1, 0} minor-to-major dimension
// layout and the value in the cell at any given logical index (i0, i1) will
// be the same.
//

View File

@ -628,7 +628,7 @@ class FusedDynamicUpdateSliceLivenessTest : public BufferLivenessTest {
BufferLiveness::Run(module.get(),
MakeUnique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
// Return whether or not buffers interfernce is detected between
// Return whether or not buffers interference is detected between
// 'tuple_param0' and 'tuple_root' at shape index '{1}'.
return TupleElementsMayInterfere(*liveness, tuple_param0, tuple_root, {1});
}
@ -740,7 +740,7 @@ class DynamicUpdateSliceLivenessTest : public BufferLivenessTest {
BufferLiveness::Run(module.get(),
MakeUnique<DependencyHloOrdering>(module.get()))
.ConsumeValueOrDie();
// Return whether or not buffers interfernce is detected between
// Return whether or not buffers interference is detected between
// 'tuple_param0' and 'tuple_root' at shape index '{1}'.
return TupleElementsMayInterfere(*liveness, tuple_param0, tuple_root, {1});
}

View File

@ -141,7 +141,7 @@ class InstructionCopier {
Status RecordAmbiguousOrNonDistinctIndices(
const TuplePointsToAnalysis& points_to_analysis);
// Records instruction buffer indices which have interferring live ranges
// Records instruction buffer indices which have interfering live ranges
// with 'other_instruction' buffers at same index.
Status RecordIndicesWhichInterfereWithOtherInstruction(
const BufferLiveness& liveness, const HloInstruction* other_instruction,
@ -431,7 +431,7 @@ HloInstruction* InstructionCopier::Copy() {
return copy;
}
// The 'read_only_indices' are initalized based on points-to analysis on the
// The 'read_only_indices' are initialized based on points-to analysis on the
// while body corresponding to 'while_hlo'. If the init buffer corresponding to
// a read-only index aliases with an entry parameter (or constant), it cannot be
// considered read-only, and must be copied. This is necessary because some

View File

@ -972,7 +972,7 @@ TEST_F(WhileCopyInsertionTest, InitPointsToNonDistinct) {
op::Copy(old_init->operand(1)->operand(0)))));
}
// Tests while init instruction buffer which interfers with while result buffer.
// Tests while init instruction buffer which interferes with while result buffer.
//
// init_data = Broadcast(...)
// add_unrelated = Add(init_data) // takes a reference to cause interference

View File

@ -125,7 +125,7 @@ tensorflow::Status ConvolutionThunk::ExecuteOnStream(
CHECK_LE(num_dimensions, 3);
// cuDNN does not support 1D convolutions. We therefore express 1D
// convolutions as 2D convolutions where the first spatial dimension is 1.
// This matches the behaviour of TF (see definition of conv1d in
// This matches the behavior of TF (see definition of conv1d in
// tensorflow/python/ops/nn_ops.py).
const int effective_num_dimensions = std::max(2, num_dimensions);

View File

@ -405,9 +405,9 @@ StatusOr<string> CompileModuleToPtx(llvm::Module* module,
AddOptimizationPasses(flags->opt_level, /*size_level=*/0,
target_machine.get(), &module_passes, &function_passes);
// Loop unrolling exposes more opportunites for SROA. Therefore, we run SROA
// Loop unrolling exposes more opportunities for SROA. Therefore, we run SROA
// again after the standard optimization passes [http://b/13329423].
// TODO(jingyue): SROA may further expose more optimization opportunites, such
// TODO(jingyue): SROA may further expose more optimization opportunities, such
// as more precise alias analysis and more function inlining (SROA may change
// the inlining cost of a function). For now, running SROA already emits good
// enough code for the evaluated benchmarks. We may want to run more

View File

@ -33,7 +33,7 @@ namespace gpu {
enum class PartitionStrategy {
// Optimized for latency by allowing maximum number of registers per thread.
kLatency,
// Optimized for throughtput. This may limit registers per thread and cause
// Optimized for throughput. This may limit registers per thread and cause
// longer latency.
kThroughput
};

View File

@ -37,7 +37,7 @@ namespace {
// patterns to match.
//
// Each ExprTree node is comprised of an HloOpcode, and a set of operands (each
// of type ExprTree). Operands can be added by specifing the index and HloOpcode
// of type ExprTree). Operands can be added by specifying the index and HloOpcode
// of the operand.
//
// For example, the following computation:

View File

@ -21,7 +21,7 @@ limitations under the License.
namespace xla {
// A pass which performs constant folding in order to avoid unecessary
// A pass which performs constant folding in order to avoid unnecessary
// computation on constants.
class HloConstantFolding : public HloPassInterface {
public:

View File

@ -133,7 +133,7 @@ class HloCostAnalysis : public DfsHloVisitor {
int64 bytes_accessed() const { return bytes_accessed_; }
private:
// An FMA counts as two floating point operations in these analyses.
// An FMA counts as two floating point operations in these analyzes.
static constexpr int64 kFmaFlops = 2;
// Utility function to handle all element-wise operations.

View File

@ -104,7 +104,7 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
std::hash<int>>
typed_visitors_;
// Tracks the HLO instruciton and its evaluated literal result.
// Tracks the HLO instruction and its evaluated literal result.
// TODO(b/35950897): have better memory management here to free instructions
// that are no longer a parent for any other subsequent instruction in
// post-orderring.

View File

@ -343,7 +343,7 @@ class ListScheduler {
return freed_bytes;
}
// Construct the scheduling priority of the given instruciton.
// Construct the scheduling priority of the given instruction.
Priority GetPriority(const HloInstruction* instruction) {
return {BytesFreedIfScheduled(instruction), instruction->user_count()};
}

View File

@ -273,7 +273,7 @@ class LayoutAssignment : public HloPassInterface {
return Status::OK();
}
// This method can be overriden to mark instructions as requiring the operands
// This method can be overridden to mark instructions as requiring the operands
// to have the same layout as the result, for performance or correctness. This
// will propagate constraints through the instruction from the result into the
// operands.

View File

@ -130,7 +130,7 @@ llvm::AllocaInst* EmitAllocaAtFunctionEntryWithCount(
llvm::Type* type, llvm::Value* element_count, tensorflow::StringPiece name,
llvm::IRBuilder<>* ir_builder, int alignment = 0);
// Creates a basic block with the same context and funtion as for the
// Creates a basic block with the same context and function as for the
// builder. Inserts at the end of the function if insert_before is
// null.
llvm::BasicBlock* CreateBasicBlock(llvm::BasicBlock* insert_before,

View File

@ -65,7 +65,7 @@ class HloTestBase : public ::testing::Test {
perftools::gputools::DeviceMemoryBase TransferToDevice(
const Literal& literal);
// Transfers the array refered to by the given handle from the device and
// Transfers the array referred to by the given handle from the device and
// returns as a Literal.
std::unique_ptr<Literal> TransferFromDevice(
const Shape& shape, perftools::gputools::DeviceMemoryBase device_base);

View File

@ -194,7 +194,7 @@ XLA_TEST_F(PrngTest, MapUsingRng) {
}
}
// This tests demonstrates the global seeding behaviour.
// This tests demonstrates the global seeding behavior.
// * If a seed is passed in via Execute (ExecuteAndTransfer) then the output is
// fixed (i.e., there is a single output for a given seed);
// * If no seed is passed in then the output of every call can be different;

View File

@ -177,7 +177,7 @@ def _logspace_mean(log_values):
`Log[Mean[values]]`.
"""
# center = Max[Log[values]], with stop-gradient
# The center hopefully keep the exponentiated term small. It is cancelled
# The center hopefully keep the exponentiated term small. It is canceled
# from the final result, so putting stop gradient on it will not change the
# final result. We put stop gradient on to eliminate unnecessary computation.
center = array_ops.stop_gradient(_sample_max(log_values))

View File

@ -42,7 +42,7 @@ class RandomTreeGen {
boosted_trees::trees::DecisionTreeConfig Generate(
const boosted_trees::trees::DecisionTreeConfig& tree);
// Requried: depth >= 1; tree_count >= 1.
// Required: depth >= 1; tree_count >= 1.
boosted_trees::trees::DecisionTreeEnsembleConfig GenerateEnsemble(
int dept, int tree_count);

View File

@ -46,7 +46,7 @@ Status GetTableAttrs(OpKernelConstruction* context, string* project_id,
} // namespace
// Note that overriden methods with names ending in "Locked" are called by
// Note that overridden methods with names ending in "Locked" are called by
// ReaderBase while a mutex is held.
// See comments for ReaderBase.
class BigQueryReader : public ReaderBase {

View File

@ -46,7 +46,7 @@ _TABLE = "test-table"
# The values for rows are generated such that some columns have null values. The
# general formula here is:
# - The int64 column is present in every row.
# - The string column is only avaiable in even rows.
# - The string column is only available in even rows.
# - The float column is only available in every third row.
_ROWS = [[0, "s_0", 0.1], [1, None, None], [2, "s_2", None], [3, None, 3.1],
[4, "s_4", None], [5, None, None], [6, "s_6", 6.1], [7, None, None],

View File

@ -141,7 +141,7 @@ _cudnn_rnn_common_doc_string = """
* Once a while, the user saves the parameter buffer into model checkpoints
with Saver.save().
* When restoring, the user creates a RNNParamsSaveable object and uses
Saver.restore() to restore the paramter buffer from the canonical format
Saver.restore() to restore the parameter buffer from the canonical format
to a user-defined format, as well as to restore other savable objects
in the checkpoint file.
"""

View File

@ -39,7 +39,7 @@ class _ExperimentalFuncGraph(function._FuncGraph):
_ExperimentalFuncGraph overrides ops.Graph's create_op() so that we can keep
track of every inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitue the input with a place holder.
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.

View File

@ -38,7 +38,7 @@ class _FakeVectorStudentT(object):
Other `Vector*` implementations need only test new code. That we don't need
to test every Vector* distribution is good because there aren't SciPy
analogues and reimplementing everything in NumPy sort of defeats the point of
analogs and reimplementing everything in NumPy sort of defeats the point of
having the `TransformedDistribution + Affine` API.
"""

View File

@ -269,7 +269,7 @@ class Binomial(distribution.Distribution):
message="total_count must be non-negative."),
distribution_util.assert_integer_form(
total_count,
message="total_count cannot contain fractional componentes."),
message="total_count cannot contain fractional components."),
], total_count)
def _maybe_assert_valid_sample(self, counts, check_integer=True):

View File

@ -130,7 +130,7 @@ def transform_tree(tree, fn, iterable_type=tuple):
tree: iterable or not. If iterable, its elements (child) can also be
iterable or not.
fn: function to apply to each leaves.
iterable_type: type use to construct the resulting tree for unknwon
iterable_type: type use to construct the resulting tree for unknown
iterable, typically `list` or `tuple`.
Returns:
A tree whose leaves has been transformed by `fn`.

View File

@ -5,7 +5,7 @@ of `SessionRunHook` and are to be used with helpers like `MonitoredSession`
and `learn.Estimator` that wrap `tensorflow.Session`.
The hooks are called between invocations of `Session.run()` to perform custom
behaviour.
behavior.
For example the `ProfilerHook` periodically collects `RunMetadata` after
`Session.run()` and saves profiling information that can be viewed in a

View File

@ -77,7 +77,7 @@ REGISTER_OP("BipartiteMatch")
.Doc(R"doc(
Find bipartite matching based on a given distance matrix.
A greedy bi-partite matching alogrithm is used to obtain the matching with the
A greedy bi-partite matching algorithm is used to obtain the matching with the
(greedy) minimum distance.
distance_mat: A 2-D float tensor of shape `[num_rows, num_columns]`. It is a

View File

@ -266,7 +266,7 @@ def bipartite_match(
top_k=-1):
"""Find bipartite matching based on a given distance matrix.
A greedy bi-partite matching alogrithm is used to obtain the matching with
A greedy bi-partite matching algorithm is used to obtain the matching with
the (greedy) minimum distance.
Args:

View File

@ -59,7 +59,7 @@ def identity_block(input_tensor, kernel_size, filters, stage, block):
Arguments:
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
@ -98,7 +98,7 @@ def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2,
Arguments:
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names

View File

@ -91,7 +91,7 @@ _IMAGE_DATA_FORMAT = 'channels_last'
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibily with multi-backend Keras.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
@ -2617,7 +2617,7 @@ def in_train_phase(x, alt, training=None):
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifing the learning phase.
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
@ -2660,7 +2660,7 @@ def in_test_phase(x, alt, training=None):
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifing the learning phase.
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.

View File

@ -1546,7 +1546,7 @@ class Container(Layer):
"""Retrieve the model's updates.
Will only include updates that are either
inconditional, or conditional on inputs to this model
unconditional, or conditional on inputs to this model
(e.g. will not include updates that depend on tensors
that aren't inputs to this model).
@ -1573,7 +1573,7 @@ class Container(Layer):
"""Retrieve the model's losses.
Will only include losses that are either
inconditional, or conditional on inputs to this model
unconditional, or conditional on inputs to this model
(e.g. will not include losses that depend on tensors
that aren't inputs to this model).

View File

@ -109,7 +109,7 @@ class BaseWrapper(object):
"""Gets parameters for this estimator.
Arguments:
**params: ignored (exists for API compatiblity).
**params: ignored (exists for API compatibility).
Returns:
Dictionary of parameter names mapped to their values.

View File

@ -85,7 +85,7 @@ class RandomFourierFeatureMapperTest(TensorFlowTestCase):
mapped_x = rffm.map(x)
mapped_x_copy = rffm.map(x)
# Two different evaluations of tensors output by map on the same input
# are identical because the same paramaters are used for the mappings.
# are identical because the same parameters are used for the mappings.
self.assertAllClose(mapped_x.eval(), mapped_x_copy.eval(), atol=0.001)
def testTwoMapperObjects(self):

View File

@ -618,7 +618,7 @@ def identity(labeled_tensor, name=None):
def slice_function(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
This is an analogue of tf.slice.
This is an analog of tf.slice.
For example:
>>> tensor = tf.reshape(tf.range(0, 6), [3, 2])
>>> labeled_tensor = lt.LabeledTensor(tensor, ['a', ('b', ['foo', 'bar'])])

View File

@ -28,7 +28,7 @@ from tensorflow.python.platform import test
class RegressionTargetColumnTest(test.TestCase):
# TODO(zakaria): test multilabel regresssion.
# TODO(zakaria): test multilabel regression.
def testRegression(self):
target_column = target_column_lib.regression_target()
with ops.Graph().as_default(), session.Session() as sess:

View File

@ -97,7 +97,7 @@ class TensorFlowDataFrame(df.DataFrame):
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
after producing `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.

View File

@ -89,7 +89,7 @@ SCIKIT_DECOUPLE_INSTRUCTIONS = (
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
@ -358,7 +358,7 @@ class BaseEstimator(
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
@ -703,7 +703,7 @@ class BaseEstimator(
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
@ -1149,7 +1149,7 @@ class Estimator(BaseEstimator):
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
@ -1165,7 +1165,7 @@ class Estimator(BaseEstimator):
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
@ -1204,7 +1204,7 @@ class Estimator(BaseEstimator):
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.

View File

@ -620,7 +620,7 @@ def _create_model_fn_ops(features,
weight_tensor = _weight_tensor(features, weight_column_name)
loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)
# Uses the deprecated API to set the tag explicitly.
# Without it, trianing and eval losses will show up in different graphs.
# Without it, training and eval losses will show up in different graphs.
logging_ops.scalar_summary(
_summary_key(head_name, mkey.LOSS), weighted_average_loss)
@ -1141,7 +1141,7 @@ def _to_labels_tensor(labels, label_name):
"""Returns label as a tensor.
Args:
labels: Label `Tensor` or `SparseTensor` or a dict containig labels.
labels: Label `Tensor` or `SparseTensor` or a dict containing labels.
label_name: Label name if labels is a dict.
Returns:
@ -1575,7 +1575,7 @@ class _MultiHead(Head):
Args:
all_model_fn_ops: list of ModelFnOps for the individual heads.
train_op_fn: Function to create train op. See `create_model_fn_ops`
documentaion for more details.
documentation for more details.
Returns:
ModelFnOps that merges all heads for TRAIN.

View File

@ -119,7 +119,7 @@ def apply_dropout(cells, dropout_keep_probabilities, random_seed=None):
"""
if len(dropout_keep_probabilities) != len(cells) + 1:
raise ValueError(
'The number of dropout probabilites must be one greater than the '
'The number of dropout probabilities must be one greater than the '
'number of cells. Got {} cells and {} dropout probabilities.'.format(
len(cells), len(dropout_keep_probabilities)))
wrapped_cells = [

View File

@ -309,7 +309,7 @@ class RunConfig(ClusterConfig, core_run_config.RunConfig):
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properites user allowes to change.
includes most properties user allowes to change.
Returns:
A uid string.

View File

@ -53,7 +53,7 @@ class Experiment(object):
"""
# TODO(ispir): remove delay_workers_by_global_step and make global step based
# waiting as only behaviour.
# waiting as only behavior.
@deprecated_args(
"2016-10-23",
"local_eval_frequency is deprecated as local_run will be renamed to "
@ -550,7 +550,7 @@ class Experiment(object):
eval_result = None
# Set the default value for train_steps_per_iteration, which will be
# overriden by other settings.
# overridden by other settings.
train_steps_per_iteration = 1000
if self._train_steps_per_iteration is not None:
train_steps_per_iteration = self._train_steps_per_iteration

View File

@ -155,7 +155,7 @@ def run(experiment_fn, output_dir=None, schedule=None, run_config=None,
to create the `Estimator` (passed as `model_dir` to its constructor). It
must return an `Experiment`. For this case, `run_config` and `hparams`
must be None.
2) It accpets two arguments `run_config` and `hparams`, which should be
2) It accepts two arguments `run_config` and `hparams`, which should be
used to create the `Estimator` (`run_config` passed as `config` to its
constructor; `hparams` used as the hyper-paremeters of the model).
It must return an `Experiment`. For this case, `output_dir` must be None.

View File

@ -140,7 +140,7 @@ def rnn_seq2seq(encoder_inputs,
scope: Scope to use, if None new will be produced.
Returns:
List of tensors for outputs and states for trianing and sampling sub-graphs.
List of tensors for outputs and states for training and sampling sub-graphs.
"""
with vs.variable_scope(scope or "rnn_seq2seq"):
_, last_enc_state = rnn.static_rnn(

View File

@ -128,9 +128,9 @@ class CategoricalVocabulary(object):
Class name.
Raises:
ValueError: if this vocabulary wasn't initalized with support_reverse.
ValueError: if this vocabulary wasn't initialized with support_reverse.
"""
if not self._support_reverse:
raise ValueError("This vocabulary wasn't initalized with "
raise ValueError("This vocabulary wasn't initialized with "
"support_reverse to support reverse() function.")
return self._reverse_mapping[class_id]

View File

@ -49,7 +49,7 @@ class Trainable(object):
steps: Number of steps for which to train model. If `None`, train forever.
'steps' works incrementally. If you call two times fit(steps=10) then
training occurs in total 20 steps. If you don't want to have incremental
behaviour please set `max_steps` instead. If set, `max_steps` must be
behavior please set `max_steps` instead. If set, `max_steps` must be
`None`.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.

View File

@ -89,7 +89,7 @@ def _export_graph(graph, saver, checkpoint_path, export_dir,
def generic_signature_fn(examples, unused_features, predictions):
"""Creates generic signature from given examples and predictions.
This is needed for backward compatibility with default behaviour of
This is needed for backward compatibility with default behavior of
export_estimator.
Args:

View File

@ -309,7 +309,7 @@ def get_most_recent_export(export_dir_base):
directories.
Returns:
A gc.Path, whith is just a namedtuple of (path, export_version).
A gc.Path, with is just a namedtuple of (path, export_version).
"""
select_filter = gc.largest_export_versions(1)
results = select_filter(gc.get_paths(export_dir_base,

View File

@ -109,7 +109,7 @@ class SavedModelExportUtilsTest(test.TestCase):
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classification2(self):
"""Tests multiple output tensors that include classes and probabilites."""
"""Tests multiple output tensors that include classes and probabilities."""
input_tensors = {
"input-1":
array_ops.placeholder(

View File

@ -837,7 +837,7 @@ class Seq2SeqTest(test.TestCase):
# with variable_scope.variable_scope("new"):
# _, losses2 = SampleGRUSeq2Seq
# inp, out, weights, per_example_loss=True)
# # First loss is scalar, the second one is a 1-dimensinal tensor.
# # First loss is scalar, the second one is a 1-dimensional tensor.
# self.assertEqual([], losses1[0].get_shape().as_list())
# self.assertEqual([None], losses2[0].get_shape().as_list())

View File

@ -49,7 +49,7 @@ class MemoryStatsOpsTest(test_util.TensorFlowTestCase):
# The memory for matrix "a" can be reused for matrix "d". Therefore, this
# computation needs space for only three matrix plus some small overhead.
def testChainOfMatmul(self):
# MaxBytesInUse is registerd on GPU only. See kernels/memory_stats_ops.cc.
# MaxBytesInUse is registered on GPU only. See kernels/memory_stats_ops.cc.
if not test.is_gpu_available():
return

View File

@ -1507,7 +1507,7 @@ class StreamingAUCTest(test.TestCase):
self.assertAlmostEqual(1, auc.eval(), 6)
def np_auc(self, predictions, labels, weights):
"""Computes the AUC explicitely using Numpy.
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].

View File

@ -466,7 +466,7 @@ class GridLSTMCell(core_rnn_cell.RNNCell):
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, defualt 1, How to split the weight
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
@ -1809,12 +1809,12 @@ class PhasedLSTMCell(core_rnn_cell.RNNCell):
period during which the gates are open.
trainable_ratio_on: bool, weather ratio_on is trainable.
period_init_min: float or scalar float Tensor. With value > 0.
Minimum value of the initalized period.
Minimum value of the initialized period.
The period values are initialized by drawing from the distribution:
e^U(log(period_init_min), log(period_init_max))
Where U(.,.) is the uniform distribution.
period_init_max: float or scalar float Tensor.
With value > period_init_min. Maximum value of the initalized period.
With value > period_init_min. Maximum value of the initialized period.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.

View File

@ -474,7 +474,7 @@ class AttentionWrapperState(
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overriden properties as provided in `kwargs`.
this one, except any overridden properties as provided in `kwargs`.
"""
return super(AttentionWrapperState, self)._replace(**kwargs)

View File

@ -352,7 +352,7 @@ we can both ensure that each layer uses the same values and simplify the code:
```
As the example illustrates, the use of arg_scope makes the code cleaner,
simpler and easier to maintain. Notice that while argument values are specifed
simpler and easier to maintain. Notice that while argument values are specified
in the arg_scope, they can be overwritten locally. In particular, while
the padding argument has been set to 'SAME', the second convolution overrides
it with the value of 'VALID'.

View File

@ -33,7 +33,7 @@ To read data using multiple readers simultaneous with shuffling:
shuffle=True)
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
Equivalently, one may request different fields of the same sample seperately:
Equivalently, one may request different fields of the same sample separately:
[images] = pascal_voc_data_provider.get(['images'])
[labels] = pascal_voc_data_provider.get(['labels'])

View File

@ -40,7 +40,7 @@ def visualize_embeddings(summary_writer, config):
"""Stores a config file used by the embedding projector.
Args:
summary_writer: The summary writer used for writting events.
summary_writer: The summary writer used for writing events.
config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
proto that holds the configuration for the projector such as paths to
checkpoint files and metadata files for the embeddings. If

View File

@ -46,7 +46,7 @@ class ProjectorApiTest(test.TestCase):
writer = writer_lib.FileWriter(temp_dir)
projector.visualize_embeddings(writer, config)
# Read the configuratin from disk and make sure it matches the original.
# Read the configurations from disk and make sure it matches the original.
with gfile.GFile(os.path.join(temp_dir, 'projector_config.pbtxt')) as f:
config2 = projector_config_pb2.ProjectorConfig()
text_format.Parse(f.read(), config2)

View File

@ -370,7 +370,7 @@ def evaluate_repeatedly(checkpoint_dir,
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immedietly after the model checkpoint has been restored.
summaries run immediately after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.

View File

@ -422,7 +422,7 @@ class HParams(object):
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported paramter type: %s' % str(param_type))
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])

View File

@ -344,7 +344,7 @@ def _prepare_sequence_inputs(inputs, states):
key = _check_rank(inputs.key, 0)
if length.dtype != dtypes.int32:
raise TypeError("length dtype must be int32, but recieved: %s" %
raise TypeError("length dtype must be int32, but received: %s" %
length.dtype)
if key.dtype != dtypes.string:
raise TypeError("key dtype must be string, but received: %s" % key.dtype)
@ -1673,7 +1673,7 @@ def _move_sparse_tensor_out_context(input_context, input_sequences, num_unroll):
shape = array_ops.concat(
[array_ops.expand_dims(value_length, 0), sp_tensor.dense_shape], 0)
# Construct new indices by mutliplying old ones and prepending [0, n).
# Construct new indices by multiplying old ones and prepending [0, n).
# First multiply indices n times along a newly created 0-dimension.
multiplied_indices = array_ops.tile(
array_ops.expand_dims(sp_tensor.indices, 0),

View File

@ -83,7 +83,7 @@ bool IsConstantFoldable(const Node* n,
}
// Returns the constant foldable nodes in `nodes` in topological order.
// Populates `constant_control_deps` with the non-constant control depedencies
// Populates `constant_control_deps` with the non-constant control dependencies
// of each constant node.
void FindConstantFoldableNodes(
const Graph* graph, ConstantFoldingOptions opts, std::vector<Node*>* nodes,

View File

@ -74,8 +74,8 @@ class Executor {
//
// RunAsync() uses "cancellation_manager", if not nullptr, to
// register callbacks that should be called if the graph computation
// is cancelled. Note that the callbacks merely unblock any
// long-running computation, and a cancelled step will terminate by
// is canceled. Note that the callbacks merely unblock any
// long-running computation, and a canceled step will terminate by
// returning/calling the DoneCallback as usual.
//
// RunAsync() dispatches closures to "runner". Typically, "runner"

View File

@ -47,7 +47,7 @@ class SessionFactory {
// Old sessions may continue to have side-effects on resources not in
// containers listed in "containers", and thus may affect future
// sessions' results in ways that are hard to predict. Thus, if well-defined
// behaviour is desired, is it recommended that all containers be listed in
// behavior is desired, is it recommended that all containers be listed in
// "containers".
//
// If the "containers" vector is empty, the default container is assumed.

View File

@ -243,7 +243,7 @@ Status SimpleGraphExecutionState::InitBaseGraph(
session_options_->config.graph_options().rewrite_options();
if (grappler::MetaOptimizerEnabled(rewrite_options)) {
// Adding this functionalty in steps. The first step is to make sure
// Adding this functionality in steps. The first step is to make sure
// we don't break dependencies. The second step will be to turn the
// functionality on by default.
grappler::GrapplerItem item;

View File

@ -660,7 +660,7 @@ Status SimplePlacer::Run() {
if (!edge->IsControlEdge() &&
(IsRefType(node->input_type(edge->dst_input())) ||
node->input_type(edge->dst_input()) == DT_RESOURCE)) {
// If both the source node and this node have paritally
// If both the source node and this node have partially
// specified a device, then 'node's device should be
// cleared: the reference edge forces 'node' to be on the
// same device as the source node.

View File

@ -20,7 +20,7 @@ package tensorflow;
import "tensorflow/core/util/event.proto";
// Reply message from EventListener to the client, i.e., to the source of the
// Event protocal buffers, e.g., debug ops inserted by a debugged runtime to a
// Event protocol buffers, e.g., debug ops inserted by a debugged runtime to a
// TensorFlow graph being executed.
message EventReply {
message DebugOpStateChange {

View File

@ -108,9 +108,9 @@ class GraphMgr {
};
struct Item : public core::RefCounted {
// TOOD(zhifengc): Keeps a copy of the original graph if the need arises.
// TOOD(zhifengc): Stats, updated by multiple runs potentially.
// TOOD(zhifengc): Dup-detection. Ensure step_id only run once.
// TODO(zhifengc): Keeps a copy of the original graph if the need arises.
// TODO(zhifengc): Stats, updated by multiple runs potentially.
// TODO(zhifengc): Dup-detection. Ensure step_id only run once.
~Item() override;
// Session handle.
@ -126,7 +126,7 @@ class GraphMgr {
// has a root executor which may call into the runtime library.
std::vector<ExecutionUnit> units;
// Used to deresgister a cost model when cost model is requried in graph
// Used to deresgister a cost model when cost model is required in graph
// manager.
GraphMgr* graph_mgr;
};
@ -157,7 +157,7 @@ class GraphMgr {
CancellationManager* cancellation_manager,
StatusCallback done);
// Don't attempt to process cost models unless explicitely requested for at
// Don't attempt to process cost models unless explicitly requested for at
// least one of the items.
bool skip_cost_models_ = true;

View File

@ -25,7 +25,7 @@ limitations under the License.
// A Master discovers remote devices on-demand and keeps track of
// statistics of those remote devices.
//
// Each session analyses the graph, places nodes across available
// Each session analyzes the graph, places nodes across available
// devices, and ultimately drives the graph computation by initiating
// RunGraph on the workers.

View File

@ -89,7 +89,7 @@ class UntypedCall : public core::RefCounted {
virtual void RequestReceived(Service* service, bool ok) = 0;
// This method will be called either (i) when the server is notified
// that the request has been cancelled, or (ii) when the request completes
// that the request has been canceled, or (ii) when the request completes
// normally. The implementation should distinguish these cases by querying
// the `grpc::ServerContext` associated with the request.
virtual void RequestCancelled(Service* service, bool ok) = 0;
@ -175,7 +175,7 @@ class Call : public UntypedCall<Service> {
}
// Registers `callback` as the function that should be called if and when this
// call is cancelled by the client.
// call is canceled by the client.
void SetCancelCallback(std::function<void()> callback) {
mutex_lock l(mu_);
cancel_callback_ = std::move(callback);

View File

@ -25,7 +25,7 @@ limitations under the License.
// A GrpcMasterService discovers remote devices in the background and
// keeps track of statistics of those remote devices.
//
// Each session analyses the graph, places nodes across available
// Each session analyzes the graph, places nodes across available
// devices, and ultimately drives the graph computation by initiating
// RunGraph on workers.
#include "tensorflow/core/distributed_runtime/rpc/grpc_master_service.h"

View File

@ -517,7 +517,7 @@ TEST(GrpcSessionTest, Error) {
//
// Subgraph for "b" sleeps at the node "b_delay". When the sleep
// finishes, the subgraph "b" will continue execution till it
// notices that it is cancelled. Meanwhile, subgraph's executor
// notices that it is canceled. Meanwhile, subgraph's executor
// and its related state (registered ops) should still be alive.
auto b = test::graph::Constant(&g, Tensor());
b->set_assigned_device_name(dev_b);

View File

@ -35,7 +35,7 @@ void WorkerCacheLogger::SetLogging(bool v) {
++want_logging_count_;
} else {
--want_logging_count_;
// If RPCs get cancelled, it may be possible for the count
// If RPCs get canceled, it may be possible for the count
// to go negative. This should not be a fatal error, since
// logging is non-critical.
if (want_logging_count_ < 0) want_logging_count_ = 0;

View File

@ -36,7 +36,7 @@ namespace tensorflow {
// CancellationManager::get_cancellation_token.
typedef int64 CancellationToken;
// A callback that is invoked when a step is cancelled.
// A callback that is invoked when a step is canceled.
//
// NOTE(mrry): See caveats about CancelCallback implementations in the
// comment for CancellationManager::RegisterCallback.

View File

@ -163,7 +163,7 @@ REGISTER_OP("HasDefaultType")
// This verifies that a function using an op before a type attr (with
// a default) is added, still works. This is important for backwards
// compatibilty.
// compatibility.
TEST(TFunc, MissingTypeAttr) {
auto fdef = FDH::Create(
// Name
@ -1021,7 +1021,7 @@ TEST(FunctionLibraryDefinitionTest, AddLibrary) {
EXPECT_EQ(s.error_message(),
"Gradient for function 'XTimesTwo' already exists.");
// No conflicing functions or gradients OK
// No conflicting functions or gradients OK
proto.Clear();
*proto.add_function() = test::function::XTimesFour();
grad.set_function_name(test::function::XTimes16().signature().name());

View File

@ -51,7 +51,7 @@ extern Status ConvertGraphDefToGraph(const GraphConstructorOptions& opts,
// On error, returns non-OK and leaves *g unmodified.
//
// "shape_refiner" can be null. It should be non-null if the caller
// intends to add additonal nodes to the graph after the import. This
// intends to add additional nodes to the graph after the import. This
// allows the caller to validate shapes of those nodes (since
// ShapeRefiner::AddNode must be called in topological order).
//

View File

@ -40,7 +40,7 @@ class AnalyticalCostEstimator : public CostEstimator {
explicit AnalyticalCostEstimator(Cluster* cluster, bool use_static_shapes);
~AnalyticalCostEstimator() override {}
// Initalizes the estimator for the specified grappler item.
// Initializes the estimator for the specified grappler item.
// This implementation always returns OK.
Status Initialize(const GrapplerItem& item) override;

View File

@ -130,7 +130,7 @@ class CostEstimator {
public:
virtual ~CostEstimator() {}
// Initalizes the estimator for the specified grappler item.
// Initializes the estimator for the specified grappler item.
// The estimator shouldn't be used if this function returns any status other
// that OK.
virtual Status Initialize(const GrapplerItem& item) = 0;

View File

@ -50,7 +50,7 @@ class MeasuringCostEstimator : public CostEstimator {
int measurement_threads);
~MeasuringCostEstimator() override {}
// Initalizes the estimator for the specified grappler item.
// Initializes the estimator for the specified grappler item.
// This implementation always returns OK.
Status Initialize(const GrapplerItem& item) override;

View File

@ -36,7 +36,7 @@ class OpLevelCostEstimator {
protected:
// Returns an estimate of device performance (in billions of operations
// executed per second) and memory bandwith (in GigaBytes/second) for the
// executed per second) and memory bandwidth (in GigaBytes/second) for the
// specified device.
virtual std::pair<double, double> GetDeviceInfo(
const DeviceProperties& device) const;

View File

@ -46,7 +46,7 @@ Status ModelPruner::Optimize(Cluster* cluster, const GrapplerItem& item,
if (nodes_to_preserve.find(node.name()) != nodes_to_preserve.end()) {
continue;
}
// Don't remove nodes that are explicitely placed.
// Don't remove nodes that are explicitly placed.
if (!node.device().empty()) {
continue;
}

View File

@ -22,7 +22,7 @@ namespace tensorflow {
namespace grappler {
// Prune a model to make it more efficient:
// * Remove unecessary operations.
// * Remove unnecessary operations.
// * Optimize gradient computations.
class ModelPruner : public GraphOptimizer {
public:

View File

@ -34,7 +34,7 @@ class NodeMap {
NodeDef* GetNode(const string& name);
std::set<NodeDef*> GetOutputs(const string& node_name);
// This method doesn't record the outputs of the added node; the outputs need
// to be explictly added by the AddOutput method.
// to be explicitly added by the AddOutput method.
void AddNode(const string& name, NodeDef* node);
void AddOutput(const string& node, const string& output);
void UpdateOutput(const string& node, const string& old_output,

View File

@ -50,7 +50,7 @@ template <typename From, typename To>
struct scalar_cast_op<std::complex<From>, To> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE To
operator()(const std::complex<From>& a) const {
// Replicate numpy behaviour of returning just the real part
// Replicate numpy behavior of returning just the real part
return static_cast<To>(a.real());
}
};
@ -59,7 +59,7 @@ template <typename From, typename To>
struct scalar_cast_op<From, std::complex<To>> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<To> operator()(
const From& a) const {
// Replicate numpy behaviour of setting the imaginary part to 0
// Replicate numpy behavior of setting the imaginary part to 0
return std::complex<To>(static_cast<To>(a), To(0));
}
};

View File

@ -713,7 +713,7 @@ class FusedResizeConv2DUsingGemmOp : public OpKernel {
const int32 before =
paddings_matrix(d, 0); // Pad before existing elements.
const int32 after =
paddings_matrix(d, 1); // Pad after exisitng elements.
paddings_matrix(d, 1); // Pad after existing elements.
OP_REQUIRES(context, before >= 0 && after >= 0,
errors::InvalidArgument("paddings must be non-negative: ",
before, " ", after));

View File

@ -116,7 +116,7 @@ class CudaSolver {
// Launches a memcpy of solver status data specified by dev_lapack_info from
// device to the host, and asynchronously invokes the given callback when the
// copy is complete. The first Status argument to the callback will be
// Status::OK if all lapack infos retrived are zero, otherwise an error status
// Status::OK if all lapack infos retrieved are zero, otherwise an error status
// is given. The second argument contains a host-side copy of the entire set
// of infos retrieved, and can be used for generating detailed error messages.
Status CopyLapackInfoToHostAsync(

View File

@ -26,7 +26,7 @@ limitations under the License.
namespace tensorflow {
// DeepConv2D is a Conv2D implementation specialzied for deep convolutions (i.e
// DeepConv2D is a Conv2D implementation specialized for deep convolutions (i.e
// large 'in_depth' and 'out_depth' product. See cost models below for details).
//
// DeepConv2D is implemented by computing the following equation:

View File

@ -22,7 +22,7 @@ namespace tensorflow {
class OpKernelContext;
// DeepConv2D is a Conv2D implementation specialzied for deep (i.e. large
// DeepConv2D is a Conv2D implementation specialized for deep (i.e. large
// in_depth * out_depth product) convolutions (see deep_conv2d.cc for details).
// DeepConv2DTransform is an interface for implementing transforms for

View File

@ -44,7 +44,7 @@ class HingeLossUpdater : public DualLossUpdater {
const double current_dual, const double wx,
const double weighted_example_norm) const final {
// Intutitvely there are 3 cases:
// a. new optimal value of the dual variable falls withing the admissible
// a. new optimal value of the dual variable falls within the admissible
// range [0, 1]. In this case we set new dual to this value.
// b. new optimal value is < 0. Then, because of convexity, the optimal
// valid value for new dual = 0

View File

@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This is a helper struct to package up the input and ouput
// This is a helper struct to package up the input and output
// parameters of an image resizer (the height, widths, etc.). To
// reduce code duplication and ensure consistency across the different
// resizers, it performs the input validation.

View File

@ -238,7 +238,7 @@ private:
{
IncompleteTuple empty(dtypes_.size());
// Initialise empty tuple with given dta
// Initialize empty tuple with given dta
for(std::size_t i = 0; i < findices.dimension(0); ++i)
{
std::size_t index = findices(i);

View File

@ -64,7 +64,7 @@ bool IsSupportedAndEnabled();
// sum((a_data[i, l] + offset_a) * (b_data[l, j] + offset_b)) : l in [0, k)
//
// If transpose_a is false the lhs operand has row major layout, otherwise
// column major. Similarily transpose_b describes the layout of the rhs operand.
// column major. Similarly transpose_b describes the layout of the rhs operand.
// lda, ldb, and ldc are the strides of the lhs operand, rhs operand and the
// result arrays.
void QuantizedGemm(OpKernelContext* context, bool transpose_a, bool transpose_b,

View File

@ -295,7 +295,7 @@ class MklConv2DCustomBackpropInputOp : public OpKernel {
dnnDelete_F32(mkl_convert_filter);
} else {
// If we do not need any layout conversion for filter, then
// we direclty assign input filter to resources[].
// we directly assign input filter to resources[].
conv_res[dnnResourceFilter] =
static_cast<void*>(const_cast<T*>(filter.flat<T>().data()));
}

View File

@ -306,7 +306,7 @@ class PaddedBatchDatasetOp : public OpKernel {
const TensorShape& element_shape =
batch_elements[i][component_index].shape();
// TODO(mrry): Perform this check in the shape function if
// enough static information is avaiable to do so.
// enough static information is available to do so.
if (element_shape.dims() != padded_shape.dims()) {
return errors::InvalidArgument(
"All elements in a batch must have the same rank as the "

View File

@ -80,7 +80,7 @@ float QuantizedToFloat(T input, float range_min, float range_max) {
static_cast<int64>(Eigen::NumTraits<T>::lowest());
const double offset_input = static_cast<double>(input) - lowest_quantized;
// For compatibility with DEQUANTIZE_WITH_EIGEN, we should convert
// range_scale to a float, otherwise range_min_rounded might be slighly
// range_scale to a float, otherwise range_min_rounded might be slightly
// different.
const double range_min_rounded =
round(range_min / static_cast<float>(range_scale)) *

View File

@ -35,7 +35,7 @@ class SmoothHingeLossUpdater : public DualLossUpdater {
const double current_dual, const double wx,
const double weighted_example_norm) const final {
// Intutitvely there are 3 cases:
// a. new optimal value of the dual variable falls withing the admissible
// a. new optimal value of the dual variable falls within the admissible
// range [0, 1]. In this case we set new dual to this value.
// b. new optimal value is < 0. Then, because of convexity, the optimal
// valid value for new dual = 0

View File

@ -24,7 +24,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
// TOOD(zongheng): this should be a general functor that powers SparseAdd and
// TODO(zongheng): this should be a general functor that powers SparseAdd and
// ScatterNd ops. It should be moved to its own head file, once the other ops
// are implemented.
template <typename Device, typename T, typename Index, int NDIMS,

View File

@ -541,7 +541,7 @@ class optional : private internal_optional::optional_data<T>,
// opt.emplace(arg1,arg2,arg3); (Constructs Foo(arg1,arg2,arg3))
//
// If the optional is non-empty, and the `args` refer to subobjects of the
// current object, then behaviour is undefined. This is because the current
// current object, then behavior is undefined. This is because the current
// object will be destructed before the new object is constructed with `args`.
//
template <typename... Args,
@ -586,7 +586,7 @@ class optional : private internal_optional::optional_data<T>,
// [optional.observe], observers
// You may use `*opt`, and `opt->m`, to access the underlying T value and T's
// member `m`, respectively. If the optional is empty, behaviour is
// member `m`, respectively. If the optional is empty, behavior is
// undefined.
constexpr const T* operator->() const { return this->pointer(); }
T* operator->() {

Some files were not shown because too many files have changed in this diff Show More