Fixed Typos (#18806)

* fixed typos
This commit is contained in:
Nicholas Nadeau, P.Eng., AVS 2018-05-03 13:47:06 -04:00 committed by Shanqing Cai
parent 4984a60e71
commit 487fa7b1a4
86 changed files with 97 additions and 98 deletions

View File

@ -236,7 +236,7 @@ Yoni Tsafir, yordun, Yuan (Terry) Tang, Yuxin Wu, zhengdi, Zhengsheng Wei, 田
* Add `complex64` support to XLA compiler.
* `bfloat` support is now added to XLA infrastructure.
* Make `ClusterSpec` propagation work with XLA devices.
* Use a determinisitic executor to generate XLA graph.
* Use a deterministic executor to generate XLA graph.
* `tf.contrib`:
* `tf.contrib.distributions`:
* Add `tf.contrib.distributions.Autoregressive`.

View File

@ -240,7 +240,7 @@ class Encapsulator {
// Once edges between compiled and outside_compilation clusters have been
// replaced by send/recv ops, some dependencies may no longer be apparent.
// A clustering pass finds all the dependencies between HC nodes that are only
// present as a result of edges between nodes in outside_compilaton clusters.
// present as a result of edges between nodes in outside_compilation clusters.
// Suppose there is a path from outside_compilation cluster C in subgraph S
// to outside_compilation cluster D in subgraph T. If S != T then a control
// edge is added from the call node for S to the call node for T, which

View File

@ -286,7 +286,7 @@ class Literal {
// Creates a new value that has the equivalent value as this literal, but
// conforms to new_layout; e.g. a literal matrix that was in {0, 1}
// minor-to-major dimension layout can be re-layed-out as {1, 0}
// minor-to-major dimension layout can be re-laid-out as {1, 0}
// minor-to-major dimension layout and the value in the cell at any given
// logical index (i0, i1) will be the same.
//

View File

@ -35,7 +35,7 @@ namespace xla {
// Tries to replace a conditional with a call operation of the corresponding
// computation. If the given conditional has a constant predicate, tries to
// replace it with a call to its true/false computation as appropirate and then
// replace it with a call to its true/false computation as appropriate and then
// inline that computation.
//
// Returns true if it made a change to the graph.

View File

@ -33,8 +33,8 @@ namespace cpu {
// emitters for function and function argument access.
// The llvm::Function is created with the standard function signature
// used in the XLA CPU backend (see ir_function.cc for argument details).
// In addtion IrFunction saves the callers IR insert point during contruction,
// and restores it after desctruction.
// In addition IrFunction saves the callers IR insert point during construction,
// and restores it after destruction.
//
// Example usage:
//

View File

@ -38,7 +38,7 @@ namespace cpu {
//
// [0, 1), [1, 2), [2, 3), [3, 4), [4, 5) [5, 8)
//
// Note that the last partition has residule because the dimension size is
// Note that the last partition has residual because the dimension size is
// not a multiple of the partition count.
//
//

View File

@ -25,7 +25,7 @@ namespace xla {
// Creates an HloPassPipeline containing multiple HloPasses that can
// despecialize an optimized HloModule. This is useful to run an HloModule
// optimized for one specfic platform on a different platform (undoing platform
// optimized for one specific platform on a different platform (undoing platform
// specific passes) with matching numerics for comparison.
//
// Current despecialization passes are Defuser, ImplicitBroadcastRemover,

View File

@ -38,7 +38,7 @@ namespace gpu {
//
// Examples of things that are not unnested computations:
//
// - The reducer of a kReduce HLO. This is emited using IrEmitterNested.
// - The reducer of a kReduce HLO. This is emitted using IrEmitterNested.
// - The body of a fusion node. IrEmitterUnenested emits the relevant code
// within a kernel function using FusedIrEmitter. (FusedIrEmitter is not
// really an IrEmitter, but is more an "IR generator generator".)

View File

@ -5,7 +5,7 @@ evaluating the result of the HLO graph directly with HloEvaluator, without
lowering it further (to LLVM IR for example) before execution as other backends
(CPU and GPU for example) do.
Its key componenets are:
Its key components are:
* [`InterpreterCompiler`] despite the inherited naming of "compiler", all
`InterpreterCompiler` really does is the following:

View File

@ -281,8 +281,8 @@ class LayoutAssignment : public HloPassInterface {
// the case that no particular layout is requested.
//
// channel_constraints is both an input and output. Any sends or recvs that
// are present in channel_constraints will be layed out as constrained. Any
// unconstrained sends or recvs will be layed out as locally optimal and their
// are present in channel_constraints will be laid out as constrained. Any
// unconstrained sends or recvs will be laid out as locally optimal and their
// layout will be added as a constraint to channel_constraints.
//
// If channel_constraints is nullptr, no kSend or kRecvs must be contained

View File

@ -75,7 +75,7 @@ StatusOr<bool> ReducePrecisionInsertion::insert_after(
return false;
}
// Check that we haven't already inserted an equivalant reduce-precision
// Check that we haven't already inserted an equivalent reduce-precision
// operation after this instruction. (The zero-user case occurs when this is
// the root instruction.)
if (instruction->user_count() > 0) {

View File

@ -23,7 +23,7 @@ limitations under the License.
namespace xla {
namespace source_map_util {
// Creates an INVALID_ARUGMENT status with the given format string.
// Creates an INVALID_ARGUMENT status with the given format string.
//
// Also, attempts to extract the OpMetadata for parameter_number on executable
// and append it to the status message for source mapping to user code.

View File

@ -33,7 +33,7 @@ DEFAULT_UNCOMPILED_MODULES = set((
(utils.__name__,),
# All of tensorflow's subpackages. Unlike the root tf module, they don't
# have well-known names. Not refering to the module directly to avoid
# have well-known names. Not referring to the module directly to avoid
# circular imports.
(
utils.__name__[:-len('.contrib.autograph.utils')],),

View File

@ -174,7 +174,7 @@ def while_stmt(test, body, init_state, extra_deps, opts=None):
Tuple containing the final state.
"""
# TODO(mdan): Consider adding a generic mechanism for dynamic dispatch.
# That could be somethins as simple as a collection of dispatch rules, with
# That could be something as simple as a collection of dispatch rules, with
# some prioritization.
if any(tensor_util.is_tensor(v) for v in init_state + extra_deps):
return _tf_while_stmt(test, body, init_state, opts)

View File

@ -369,7 +369,7 @@ class GradientBoostedDecisionTreeModel(object):
Returns:
a dictionary of prediction results -
ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,
NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPED.
NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.
"""
ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle,
ensemble_stamp)

View File

@ -53,7 +53,7 @@ class CholeskyOuterProduct(bijector.Bijector):
its spectrum), and that the product of two positive-diagonal lower-triangular
matrices is another positive-diagonal lower-triangular matrix.
A simple inductive argument (proceding one column of L_3 at a time) shows
A simple inductive argument (proceeding one column of L_3 at a time) shows
that, if `I = L_3 @ L_3.T`, with L_3 being lower-triangular with positive-
diagonal, then `L_3 = I`. Thus, `L_1 = L_2`, proving injectivity of g.

View File

@ -1,6 +1,6 @@
# Eager Execution
Eager execution provides an imperative interface to TensorFlow (similiar to
Eager execution provides an imperative interface to TensorFlow (similar to
[NumPy](http://www.numpy.org)). When you enable eager execution, TensorFlow
operations execute immediately; you do not execute a pre-constructed graph with
[`Session.run()`](https://www.tensorflow.org/api_docs/python/tf/Session).

View File

@ -53,7 +53,7 @@ Status CreateAudioFile(const string& audio_format_id, int32 bits_per_second,
int32 samples_per_second, int32 channel_count,
const std::vector<float>& samples, string* output_data);
// Reads an video file using ffmpeg adn converts it into a RGB24 in uint8
// Reads an video file using ffmpeg and converts it into a RGB24 in uint8
// [frames, height, width, 3]. The w, h, and frames are obtained from ffmpeg.
Status ReadVideoFile(const string& filename, std::vector<uint8>* output_data,
uint32* width, uint32* height, uint32* frames);

View File

@ -202,7 +202,7 @@ class CriticalSection(object):
or lazy way that may cause a deadlock.
ValueError: If `exclusive_resource_access` is not provided (is `True`) and
another `CriticalSection` has an execution requesting the same
resources as in `*args`, `**kwargs`, and any additionaly captured
resources as in `*args`, `**kwargs`, and any additionally captured
inputs in `fn`. Note, even if `exclusive_resource_access` is `True`,
if another execution in another `CriticalSection` was created without
`exclusive_resource_access=True`, a `ValueError` will be raised.

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellanous utilities for TFGAN code and examples."""
"""Miscellaneous utilities for TFGAN code and examples."""
from __future__ import absolute_import
from __future__ import division

View File

@ -677,7 +677,7 @@ def copy_with_input_replacements(sgv, replacement_ts,
def _add_control_flow_ops(ops, control_ios):
"""Complete `ops` so that the tranformed graph is valid.
"""Complete `ops` so that the transformed graph is valid.
Partially copying a graph can lead to a malformed graph. For instance,
copying half of a while construct is likely to result in an invalid graph.

View File

@ -17,7 +17,7 @@
### API
This module provides functions for image manipulation; currently, chrominance
transformas (including changing saturation and hue) in YIQ space and
transforms (including changing saturation and hue) in YIQ space and
projective transforms (including rotation) are supported.
## Image Transformation `Ops`

View File

@ -325,7 +325,7 @@ def distributed_grads_only_and_ops_chief_worker(
All workers perform gradient computation. Chief worker applies gradient after
averaging the gradients obtained from all the workers. All workers block
execution untill the update is applied. Chief worker runs covariance and
execution until the update is applied. Chief worker runs covariance and
inverse update ops. Covariance and inverse matrices are placed on parameter
servers in a round robin manner. For further details on synchronous
distributed optimization check `tf.train.SyncReplicasOptimizer`.

View File

@ -66,7 +66,7 @@ class KfacOptimizer(gradient_descent.GradientDescentOptimizer):
the local approximation with the Fisher information matrix, and to
regularize the update direction by making it closer to the gradient.
If damping is adapted during training then this value is used for
initializing damping varaible.
initializing damping variable.
(Higher damping means the update looks more like a standard gradient
update - see Tikhonov regularization.)
layer_collection: The layer collection object, which holds the fisher
@ -195,7 +195,7 @@ class KfacOptimizer(gradient_descent.GradientDescentOptimizer):
min_damping: `float`(Optional), Minimum value the damping parameter
can take. Default value 1e-5.
damping_adaptation_decay: `float`(Optional), The `damping` parameter is
multipled by the `damping_adaptation_decay` every
multiplied by the `damping_adaptation_decay` every
`damping_adaptation_interval` number of iterations. Default value 0.99.
damping_adaptation_interval: `int`(Optional), Number of steps in between
updating the `damping` parameter. Default value 5.

View File

@ -51,7 +51,7 @@ class RoundRobinPlacementMixin(object):
self._inv_devices = inv_devices
def make_vars_and_create_op_thunks(self, scope=None):
"""Make vars and create op thunks w/ a round-robin device placement strat.
"""Make vars and create op thunks w/ a round-robin device placement start.
For each factor, all of that factor's cov variables and their associated
update ops will be placed on a particular device. A new device is chosen

View File

@ -1814,7 +1814,7 @@ inline void LstmCell(const float* input_data, const Dims<4>& input_dims,
// requiring a power-of-two representation interval. Thus, we should right
// away quantize this array to a power-of-two interval; otherwise,
// implementation will need to rescale that, losing any benefit that a tighter
// representation interval might otherwise yield, while introducting some
// representation interval might otherwise yield, while introducing some
// numerical error and computational overhead.
//
// Now, Logistic and Tanh

View File

@ -65,7 +65,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
// A list of builtin operators. Builtin operators a slighlty faster than custom
// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.

View File

@ -48,7 +48,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
// A list of builtin operators. Builtin operators a slighlty faster than custom
// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.

View File

@ -53,7 +53,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
// A list of builtin operators. Builtin operators a slighlty faster than custom
// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.

View File

@ -54,7 +54,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
// A list of builtin operators. Builtin operators a slighlty faster than custom
// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.

View File

@ -53,7 +53,7 @@ table Tensor {
type:TensorType;
// An index that refers to the buffers table at the root of the model. Or,
// if there is no data buffer associated (i.e. intermediate results), then
// this is 0 (which refers to an always existant empty buffer).
// this is 0 (which refers to an always existent empty buffer).
//
// The data_buffer itself is an opaque container, with the assumption that the
// target device is little-endian. In addition, all builtin operators assume
@ -64,7 +64,7 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
}
// A list of builtin operators. Builtin operators a slighlty faster than custom
// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.

View File

@ -1758,7 +1758,7 @@ def make_strided_slice_tests(zip_path):
"shrink_axis_mask": [None, 1, 8, 11, 15, -1],
"constant_indices": [False, True],
},
# TODO(b/73170889) Restore test paramaters removed in cl/191608113.
# TODO(b/73170889) Restore test parameters removed in cl/191608113.
# 2-D
{
"dtype": [tf.float32, tf.int32, tf.int64],
@ -1899,7 +1899,7 @@ def make_lstm_tests(zip_path):
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign vairables, and freeze graph."""
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")

View File

@ -226,8 +226,8 @@ void TfLiteDriver::SetExpectation(int id, const string& csv_values) {
if (!IsValid()) return;
auto* tensor = interpreter_->tensor(id);
if (expected_output_.count(id) != 0) {
fprintf(stderr, "Overriden expectation for tensor %d\n", id);
Invalidate("Overriden expectation");
fprintf(stderr, "Overridden expectation for tensor %d\n", id);
Invalidate("Overridden expectation");
}
expected_output_[id].reset(new Expectation);
switch (tensor->type) {

View File

@ -115,7 +115,7 @@ bazel run --config=opt \
In order to evaluate the possible benefit of generating a quantized graph, TOCO
allows "dummy-quantization" on float graphs. The flags `--default_ranges_min`
and `--default_ranges_max` accept plausable values for the min-max ranges of the
and `--default_ranges_max` accept plausible values for the min-max ranges of the
values in all arrays that do not have min-max information. "Dummy-quantization"
will produce lower accuracy but will emulate the performance of a correctly
quantized model.
@ -338,7 +338,7 @@ below outline the use cases for each.
### Using `--output_format=GRAPHVIZ_DOT`
The first way to get a graphviz rendering is to pass `GRAPHVIZ_DOT` into
`--output_format`. This results in a plausable visualization of the graph. This
`--output_format`. This results in a plausible visualization of the graph. This
reduces the requirements that normally exist during conversion between other
input and output formats. For example, this may be useful if conversion from
TENSORFLOW_GRAPHDEF to TFLITE is failing.

View File

@ -144,7 +144,7 @@ ArrayDataType ConvertDataType(tensorflow::DataType dtype) {
else if (dtype == DT_STRING)
return ArrayDataType::kString;
else
LOG(INFO) << "Unsupported data type in placehoder op: " << dtype;
LOG(INFO) << "Unsupported data type in placeholder op: " << dtype;
return ArrayDataType::kNone;
}

View File

@ -25,10 +25,10 @@ namespace tflite {
class BaseOperator;
// Return a map contained all knwo TF Lite Operators, keyed by their names.
// Return a map contained all know TF Lite Operators, keyed by their names.
std::map<string, std::unique_ptr<BaseOperator>> BuildOperatorByNameMap();
// Return a map contained all knwo TF Lite Operators, keyed by the type of
// Return a map contained all know TF Lite Operators, keyed by the type of
// their tf.mini counterparts.
std::map<OperatorType, std::unique_ptr<BaseOperator>> BuildOperatorByTypeMap();

View File

@ -44,7 +44,7 @@ template <ArrayDataType T>
Array ToFlatBufferAndBack(std::initializer_list<::toco::DataType<T>> items) {
// NOTE: This test does not construct the full buffers list. Since
// Deserialize normally takes a buffer, we need to synthesize one and provide
// an index that is non-zero so the buffer is not assumed to be emtpy.
// an index that is non-zero so the buffer is not assumed to be empty.
Array src;
src.data_type = T;
src.GetMutableBuffer<T>().data = items;

View File

@ -58,7 +58,7 @@ def create_local_cluster(num_workers, num_ps, protocol="grpc"):
# Creates the workers and return their sessions, graphs, train_ops.
# Cheif worker will update at last
# Chief worker will update at last
def _get_workers(num_workers, period, workers, moving_rate):
sessions = []
graphs = []

View File

@ -57,7 +57,7 @@ def create_local_cluster(num_workers, num_ps, protocol="grpc"):
# Creates the workers and return their sessions, graphs, train_ops.
# Cheif worker will update at last
# Chief worker will update at last
def _get_workers(num_workers, steps, workers):
sessions = []
graphs = []

View File

@ -2145,7 +2145,7 @@ tensorflow::Status ConvertCalibrationNodeToEngineNode(
if (!status.ok() || !calib_res->calibrator_) {
return tensorflow::errors::FailedPrecondition(
"You must run calibration"
" and inference conversion in the same proces");
" and inference conversion in the same process");
}
calib_res->calibrator_->setDone();

View File

@ -159,7 +159,7 @@ When the receiver receives the RDMA write, it will locate the relevant **RdmaTen
* step_id - Step ID.
* request_index - Request index.
* remote_addr/rkey - Address/rkey of the reallocated result/proxy tensor.
* **RDMA_MESSAGE_ERROR_STATUS** - (sender ==> receiver) Notify the receiver that an error had occured on the sender side, so it can propagate it to the upper levels.
* **RDMA_MESSAGE_ERROR_STATUS** - (sender ==> receiver) Notify the receiver that an error had occurred on the sender side, so it can propagate it to the upper levels.
* type - The message type.
* name (name_size) - Name of the requested tensor.
* step_id - Step ID.

View File

@ -80,7 +80,7 @@ void Broadcaster::Run(StatusCallback done) {
// continuing to occupy its current position. Hence we calculate as
// though each device's rank is actually r+1, then subtract 1 again to
// get the descendent ranks. If the source is not rank 0 then its
// decendents include both {0,1} and the descendents of its current
// descendants include both {0,1} and the descendents of its current
// position. Where a non-0-rank source is a descendent of another
// device, no send to it is necessary.
@ -115,7 +115,7 @@ void Broadcaster::TreeSendTo(const CollectiveParams& cp,
DCHECK_NE(successor_rank, my_rank);
if (cp.is_source && source_rank != 0) {
// The source sends to rank 0,1 in addition to its positional
// decendents.
// descendants.
if (cp.group.group_size > 1) {
targets->push_back(0);
}

View File

@ -79,7 +79,7 @@ class BufRendezvous {
const ProducerCallback& done);
// Called to request access to a Tensor value corresponding to key.
// Consumer is provide with a Hook as soon as availble.
// Consumer is provide with a Hook as soon as available.
void ConsumeBuf(const string& key, const ConsumerCallback& done);
// Consumer must call this function when it's done reading the Hook provided

View File

@ -275,7 +275,7 @@ void RingReducer::InitRingField(RingField* rf, int chunk_idx, int subdiv_idx,
// Note on field indexing: There are group_size_ devices in the
// instance, implying the same number of chunks per tensor, where a
// chunk is the unit of data transferred in a time step. However, if
// a device can simultaenously send data by 2 or more independent
// a device can simultaneously send data by 2 or more independent
// channels we can speed up the transfer by subdividing chunks and
// processing multiple subdivisions at once. So the actual number
// of RingFields is group_size_ * num_subdivs_.

View File

@ -104,7 +104,7 @@ ScopedAllocatorContainer::~ScopedAllocatorContainer() {
// contents deleted via Drop. When when a step ends early
// (e.g. through abnormal termination) we need to clean up
// explicitly. So long as graph execution of the associated step has
// completey terminated this should be safe.
// completely terminated this should be safe.
for (auto& it : allocators_) {
if (it.second.field_index == ScopedAllocator::kBackingIndex) {
delete it.second.scoped_allocator;

View File

@ -52,7 +52,7 @@ namespace {
// Creates an Event proto representing a chunk of a Tensor. This method only
// populates the field of the Event proto that represent the envelope
// informaion (e.g., timestmap, device_name, num_chunks, chunk_index, dtype,
// information (e.g., timestamp, device_name, num_chunks, chunk_index, dtype,
// shape). It does not set the value.tensor field, which should be set by the
// caller separately.
Event PrepareChunkEventProto(const DebugNodeKey& debug_node_key,

View File

@ -115,7 +115,7 @@ class GrpcWorkerCache : public WorkerCachePartial {
size_t AssignWorkerToThread(const string& target) {
// Round-robin target assignment, but keeps the same target on the same
// polling thread always, as this is important for gRPC performace
// polling thread always, as this is important for gRPC performance
mutex_lock lock(assignment_mu_);
auto it = target_assignments_.find(target);
if (it == target_assignments_.end()) {

View File

@ -59,14 +59,14 @@ class ApiDefMap {
// You can call this method multiple times to load multiple
// sets of files. Api definitions are merged if the same
// op definition is loaded multiple times. Later-loaded
// definitions take precedense.
// definitions take precedence.
// ApiDefs loaded from files must contain a subset of ops defined
// in the OpList passed to the constructor.
Status LoadFileList(Env* env, const std::vector<string>& filenames);
// Load a single file. Api definitions are merged if the same
// op definition is loaded multiple times. Later-loaded
// definitions take precedense.
// definitions take precedence.
// ApiDefs loaded from file must contain a subset of ops defined
// in the OpList passed to the constructor.
Status LoadFile(Env* env, const string& filename);

View File

@ -534,7 +534,7 @@ class OpKernelContext {
Rendezvous* rendezvous = nullptr;
// Mechanism for executing a collective op that needs to coordinate
// with parallel instances runing on other devices.
// with parallel instances running on other devices.
CollectiveExecutor* collective_executor = nullptr;
// The session state for this op.

View File

@ -31,7 +31,7 @@ namespace tensorflow {
// future to support these features.
//
// TODO(skyewm): de/serialize in MetaGraphDef so imported while loops will be
// differentiable. Figure out backwards compatability story.
// differentiable. Figure out backwards compatibility story.
class WhileContext {
public:
WhileContext(StringPiece frame_name, std::vector<Node*> enter_nodes,

View File

@ -574,7 +574,6 @@ class SymbolicShapeRefiner {
}
};
// Compute the shape of the tensors outputed by node 'node' at output port
// 'port_index' as the union of shape1 and shape2.
ShapeHandle OutputAsUnion(const NodeDef* node, int port_index,
ShapeHandle shape1, ShapeHandle shape2) {
@ -968,7 +967,7 @@ Status GraphProperties::PropagateShapes(
const std::unordered_map<const NodeDef*, const NodeDef*>& resource_handles,
int num_loops) const {
// Limit the number of iterations to prevent infinite loops in the presence of
// incorrect shape functions. The algoritm should converge in at most
// incorrect shape functions. The algorithm should converge in at most
// num_nested_loops^2 * max_rank. We approximate max_rank with the constant 4.
// The same applies to resources.
VLOG(1) << "Propagating " << new_shapes->size() << " new shapes through "

View File

@ -328,7 +328,7 @@ class VirtualScheduler {
Costs graph_costs_; // Graph cost.
std::map<string, Costs> op_to_cost_; // Per-op cost.
// Auxilliary data structures for constructing NodeState and DeviceState.
// Auxiliary data structures for constructing NodeState and DeviceState.
GraphProperties graph_properties_;
Cluster* cluster_; // Not owned.

View File

@ -2183,7 +2183,7 @@ Status LayoutOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
TuningConfig config;
config.no_gemm = true;
// TODO(yaozhang): Enable tuning with various TuningConfig choices wtih
// TODO(yaozhang): Enable tuning with various TuningConfig choices with
// the measurement-based estimator.
status = Tune(item, graph_properties, config, output);
if (!status.ok()) {

View File

@ -76,7 +76,7 @@ class AdaptiveSharedBatchScheduler
AdaptiveSharedBatchScheduler<TaskType>> {
public:
~AdaptiveSharedBatchScheduler() {
// Finish processing batches before destorying other class members.
// Finish processing batches before destroying other class members.
batch_thread_pool_.reset();
}

View File

@ -595,7 +595,7 @@ constexpr bool TileSizeOnNonLongSideFrontier(int TileLongSide,
// For a tile size combination (longside, shortside), lying on the frontier
// implies that (longside, shortside) is on or within the frontier but
// (longside*2, shortside) or (longside, shortside+1) is not. With the above
// critereon, we simply need to use !TileSizeOnLongSideFrontier to ensure that
// criterion, we simply need to use !TileSizeOnLongSideFrontier to ensure that
// it is not on the long side frontier.
return !TileSizeOutsideFrontier(TileLongSide, TileShortSide, size_of_t) &&
(TileSizeOutsideFrontier(TileLongSide * 2, TileShortSide, size_of_t) ||

View File

@ -114,7 +114,7 @@ struct NthElementFunctor<CPUDevice, T> {
auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads());
// The average time complexity of partition-based nth_element (BFPRT) is
// O(n), althought the worst time complexity could be O(n^2). Here, 20 is a
// O(n), although the worst time complexity could be O(n^2). Here, 20 is a
// empirical factor of cost_per_unit.
Shard(worker_threads.num_threads, worker_threads.workers, num_rows,
20 * last_dim, SubNthElement);

View File

@ -84,7 +84,7 @@ void DoRoll(OpKernelContext* context, const int64 num_elements,
// Shard
auto worker_threads = context->device()->tensorflow_cpu_worker_threads();
// 15 - expiramentally determined with float and bool types
const int cost_per_element = 15 * sizeof(T); // rough esitmate
const int cost_per_element = 15 * sizeof(T); // rough estimate
Shard(worker_threads->num_threads, worker_threads->workers, num_elements,
cost_per_element, std::move(work));
}

View File

@ -103,7 +103,7 @@ constexpr char kResolveCacheSecs[] = "GCS_RESOLVE_REFRESH_SECS";
// The environment variable to configure the http request's connection timeout.
constexpr char kRequestConnectionTimeout[] =
"GCS_REQUEST_CONNECTION_TIMEOUT_SECS";
// The environment varaible to configure the http request's idle timeout.
// The environment variable to configure the http request's idle timeout.
constexpr char kRequestIdleTimeout[] = "GCS_REQUEST_IDLE_TIMEOUT_SECS";
// The environment variable to configure the overall request timeout for
// metadata requests.

View File

@ -132,7 +132,7 @@ class GcsThrottle {
* UpdateState updates the available_tokens_ and last_updated_secs_ variables.
*
* UpdateState should be called in order to mark the passage of time, and
* therefore add tokens to the availble_tokens_ pool.
* therefore add tokens to the available_tokens_ pool.
*/
void UpdateState() EXCLUSIVE_LOCKS_REQUIRED(mu_);

View File

@ -82,7 +82,7 @@ bazel-bin/tensorflow/core/profiler/profiler \
#
# Alternatively, user can pass separate files.
#
# --graph_path contains the model architecutre and tensor shapes.
# --graph_path contains the model architecture and tensor shapes.
# --run_meta_path contains the memory and time information.
# --op_log_path contains float operation and code traces.
# --checkpoint_path contains the model checkpoint data.

View File

@ -32,7 +32,7 @@ message RewriterConfig {
AGGRESSIVE = 3;
}
// Enum controling the number of times to run optimizers. The default is to
// Enum controlling the number of times to run optimizers. The default is to
// run them once.
enum NumIterationsType {
DEFAULT_NUM_ITERS = 0;

View File

@ -537,7 +537,7 @@ __device__ detail::ToTypeIfConvertible<U, T> CudaAtomicSub(T* ptr, U value) {
return atomicSub(ptr, value);
}
// Specializations of substraction which add the negative value.
// Specializations of subtraction which add the negative value.
__device__ inline float CudaAtomicSub(float* ptr, float value) {
return CudaAtomicAdd(ptr, -value);
}

View File

@ -1359,7 +1359,7 @@ inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by perserving dimension order.
/// in Tensorflow's TensorShape object by preserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims

View File

@ -61,7 +61,7 @@ enum FilterTensorFormat {
FORMAT_OIHW = 1,
// OIHW_VECT_I is the most performant tensor format for cudnn6's quantized
// int8 convolution and fused convolution. It is analagous to the NCHW_VECT_C
// int8 convolution and fused convolution. It is analogous to the NCHW_VECT_C
// data format. It is laid out in the same order as OIHW, except that the size
// of the Input Channels dimension is divided by 4, and a new dimension of
// size 4 is appended, which packs 4 adjacent input channel weights into an

View File

@ -184,7 +184,7 @@ The recommended way to read a TFRecord file is with a @{tf.data.TFRecordDataset}
dataset = dataset.map(decode)
```
To acomplish the same task with a queue based input pipeline requires the following code
To accomplish the same task with a queue based input pipeline requires the following code
(using the same `decode` function from the above example):
``` python

View File

@ -1,6 +1,6 @@
# How to run TensorFlow on S3
Tensorflow supports reading and writing data to S3. S3 is an object storage API which is nearly ubiquitious, and can help in situations where data must accessed by multiple actors, such as in distributed training.
Tensorflow supports reading and writing data to S3. S3 is an object storage API which is nearly ubiquitous, and can help in situations where data must accessed by multiple actors, such as in distributed training.
This document guides you through the required setup, and provides examples on usage.

View File

@ -212,7 +212,7 @@ handle the task then it will be difficult to train a computer to do better.
After youve solved any fundamental issues with your use case, you need to
create a labeled dataset to define what problem youre trying to solve. This
step is extremely important, moreso than picking which model to use. You want it
step is extremely important, more than picking which model to use. You want it
to be as representative as possible of your actual use case, since the model
will only be effective at the task you teach it. Its also worth investing in
tools to make labeling the data as efficient and accurate as possible. For

View File

@ -114,7 +114,7 @@ def is_sequence(seq):
NOTE(mrry): This differs from `tensorflow.python.util.nest.is_sequence()`,
which *does* treat a Python list as a sequence. For ergonomic
reasons, `tf.data` users would prefer to treat lists as
implict `tf.Tensor` objects, and dicts as (nested) sequences.
implicit `tf.Tensor` objects, and dicts as (nested) sequences.
Args:
seq: an input sequence.

View File

@ -883,7 +883,7 @@ class Estimator(object):
model_fn_lib.ModeKeys.TRAIN,
self.config)
# TODO(anjalisridhar): Figure out how to resolve the folowing scaffold
# TODO(anjalisridhar): Figure out how to resolve the following scaffold
# parameters: init_feed_dict, init_fn.
scaffold_list = self._distribution.unwrap(
grouped_estimator_spec.scaffold)

View File

@ -52,7 +52,7 @@ def _fill_array(arr, seq, fillvalue=0):
If length of seq is less than arr padded length, fillvalue used.
Args:
arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len].
seq: Non-padded list of data sampels of shape
seq: Non-padded list of data samples of shape
[batch_size, ..., padded_dim(None)]
fillvalue: Default fillvalue to use.
"""

View File

@ -48,7 +48,7 @@ should choose depends on (1) the feature type and (2) the model type.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosphy", ...]), dimension=10)
"department", ["math", "philosophy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).

View File

@ -2573,7 +2573,7 @@ def set_shape_and_handle_data_for_outputs(op):
When _USE_C_API = True, this is lazily called when a tensor's shape is first
requested. Usually this should work automatically, but some edge cases may
require manaully calling this first to make sure Tensor._shape_val and
require manually calling this first to make sure Tensor._shape_val and
Tensor._handle_data are set (e.g. manually overriding _handle_data, copying a
Tensor).
"""

View File

@ -674,7 +674,7 @@ def run_in_graph_and_eager_modes(__unused__=None,
Args:
__unused__: Prevents sliently skipping tests.
__unused__: Prevents silently skipping tests.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.

View File

@ -115,7 +115,7 @@ class Network(base_layer.Layer):
# Entries are unique. Includes input and output layers.
self._layers = []
# Used in symbolic mode only, only in conjonction with graph-networks
# Used in symbolic mode only, only in conjunction with graph-networks
self._outbound_nodes = []
self._inbound_nodes = []

View File

@ -457,7 +457,7 @@ class TestWholeModelSaving(test.TestCase):
with h5py.File(fname, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happend.
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@ -502,7 +502,7 @@ class TestWholeModelSaving(test.TestCase):
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happend.
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)

View File

@ -72,7 +72,7 @@ def _any_variable_initalized():
"""Check if any variable has been initialized in the Keras model.
Returns:
boolean, True if at least one variable has been initalized, else False.
boolean, True if at least one variable has been initialized, else False.
"""
variables = variables_module.global_variables()
for v in variables:

View File

@ -703,7 +703,7 @@ class FillTriangularTest(test.TestCase):
raise ValueError("Invalid shape.")
n = np.int32(n)
# We can't do: `x[..., -(n**2-m):]` because this doesn't correctly handle
# `m == n == 1`. Hence, we do absoulte indexing.
# `m == n == 1`. Hence, we do absolute indexing.
x_tail = x[..., (m - (n * n - m)):]
y = np.concatenate(
[x, x_tail[..., ::-1]] if upper else [x_tail, x[..., ::-1]],

View File

@ -93,7 +93,7 @@ class RollTest(test_util.TensorFlowTestCase):
def testNegativeAxis(self):
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
# Make sure negative axis shoudl be 0 <= axis + dims < dims
# Make sure negative axis should be 0 <= axis + dims < dims
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):

View File

@ -1285,7 +1285,7 @@ def reduce_sum(input_tensor,
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum appart the fact that numpy upcast uint8 and int32 to
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""

View File

@ -734,7 +734,7 @@ class DistributionStrategy(object):
`fn` may call `tf.get_tower_context()` to access methods such as
`tower_id()` and `merge_call()`.
`merge_call()` is used to communicate betwen the towers and
`merge_call()` is used to communicate between the towers and
re-enter the cross-tower context. All towers pause their execution
having encountered a `merge_call()` call. After that the
`merge_fn`-function is executed. Its results are then unwrapped and

View File

@ -234,7 +234,7 @@ void SetDifferentKeysError(PyObject* dict1, PyObject* dict2, string* error_msg,
// Returns true iff there were no "internal" errors. In other words,
// errors that has nothing to do with structure checking.
// If an "internal" error occured, the appropriate Python error will be
// If an "internal" error occurred, the appropriate Python error will be
// set and the caller can propage it directly to the user.
//
// Both `error_msg` and `is_type_error` must be non-null. `error_msg` must

View File

@ -97,7 +97,7 @@ PyObject* AssertSameStructure(PyObject* o1, PyObject* o2, bool check_types);
// used instead. The same convention is followed in `pack_sequence_as`. This
// correctly repacks dicts and `OrderedDict`s after they have been flattened,
// and also allows flattening an `OrderedDict` and then repacking it back using
// a correponding plain dict, or vice-versa.
// a corresponding plain dict, or vice-versa.
// Dictionaries with non-sortable keys cannot be flattened.
//
// Args:

View File

@ -639,7 +639,7 @@ class CudnnSupport : public dnn::DnnSupport {
// Guards the enqueueing of DNN operations via the dnn_handle_ below, and
// access to current_dnn_stream_.
//
// This is a public member because we need to add thread safty annotations in
// This is a public member because we need to add thread safety annotations in
// the cudnn wrapper functions in the cc file, which need to access this
// mutex (the annotations require C++ permission checks).
mutex dnn_handle_mutex_;

View File

@ -1492,7 +1492,7 @@ def tf_py_wrap_cc(name,
# This macro is for running python tests against system installed pip package
# on Windows.
#
# py_test is built as an exectuable python zip file on Windows, which contains all
# py_test is built as an executable python zip file on Windows, which contains all
# dependencies of the target. Because of the C++ extensions, it would be very
# inefficient if the py_test zips all runfiles, plus we don't need them when running
# tests against system installed pip package. So we'd like to get rid of the deps

View File

@ -388,7 +388,7 @@ input is collapsed down into a simple constant.
Args:
* clear_output_shapes: Clears tensor shape information saved as attributes.
Some older graphs containes out-of-date information and may cause import
Some older graphs contains out-of-date information and may cause import
errors. Defaults to true.
Prerequisites: None

View File

@ -70,7 +70,7 @@ Other eager execution examples can be found under [tensorflow/contrib/eager/pyth
- After training, you may use the model to perform inference on input data in
the SNLI data format. The premise and hypotheses sentences are specified with
the command-line flags `--inference_premise` and `--inference_hypothesis`,
respecitvely. Each sentence should include the words, as well as parentheses
respectively. Each sentence should include the words, as well as parentheses
representing a binary parsing of the sentence. The words and parentheses
should all be separated by spaces. For instance,