minor spelling tweaks

This commit is contained in:
Kazuaki Ishizaki 2020-03-11 02:31:27 +09:00
parent 49eac7f8e2
commit c6880467be
12 changed files with 18 additions and 18 deletions

View File

@ -154,7 +154,7 @@ class MnistLstmModel(object):
"""Build the model using the given configs.
Returns:
x: The input placehoder tensor.
x: The input placeholder tensor.
logits: The logits of the output.
output_class: The prediction.
"""

View File

@ -163,7 +163,7 @@ class TFLiteLSTMCell(rnn_cell_impl.LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This is used only for TfLite, it provides hints and it also makes the
variables in the desired for the tflite ops (transposed and seaparated).
variables in the desired for the tflite ops (transposed and separated).
The default non-peephole implementation is based on:

View File

@ -29,7 +29,7 @@ limitations under the License.
namespace tflite {
// Forward declaraction for op kernels.
// Forward declaration for op kernels.
namespace ops {
namespace custom {
@ -505,7 +505,7 @@ class HashtableGraph {
TestErrorReporter error_reporter_;
};
// HashtableDefaultGraphTest tests hash table feautres on a basic graph, created
// HashtableDefaultGraphTest tests hash table features on a basic graph, created
// by the HashtableGraph class.
template <typename KeyType, typename ValueType>
class HashtableDefaultGraphTest {

View File

@ -63,10 +63,10 @@ typedef NS_ENUM(NSUInteger, TFLInterpreterErrorCode) {
/** Failed to allocate memory for tensors. */
TFLInterpreterErrorCodeFailedToAllocateTensors,
/** Operaton not allowed without allocating memory for tensors first. */
/** Operation not allowed without allocating memory for tensors first. */
TFLInterpreterErrorCodeAllocateTensorsRequired,
/** Operaton not allowed without invoking the interpreter first. */
/** Operation not allowed without invoking the interpreter first. */
TFLInterpreterErrorCodeInvokeInterpreterRequired,
};

View File

@ -39,7 +39,7 @@ class TensorReader {
const T* input_data_;
};
/// Helper class for accesing TFLite tensor data. This specialized class is for
/// Helper class for accessing TFLite tensor data. This specialized class is for
/// std::string type.
template <>
class TensorReader<std::string> {

View File

@ -567,7 +567,7 @@ inline void PackFloatAvx2Packer(const float* src_ptr, const float* zerobuf,
RUY_DCHECK_EQ(PackImplFloatAvx2::Layout::kCols, 8);
RUY_DCHECK_EQ(PackImplFloatAvx2::Layout::kRows, 1);
// This packing amounts to tranposition of 8x8 blocks.
// This packing amounts to transposition of 8x8 blocks.
static constexpr int kPackCols = 8; // Source cols packed together.
static constexpr int kPackRows = 8; // Short input is padded.

View File

@ -57,7 +57,7 @@ limitations under the License.
//
// These are mostly sub-selections of architectures.
// Detect NEON. Explictly avoid emulation, or anything like it, on x86.
// Detect NEON. Explicitly avoid emulation, or anything like it, on x86.
#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && !RUY_PLATFORM(X86)
#define RUY_DONOTUSEDIRECTLY_NEON 1
#else

View File

@ -252,7 +252,7 @@ Scalar Parametrized(float param) {
template <typename Scalar>
struct RandomRangeBounds<Scalar, false> {
static Scalar GetMinBound(RandomRange range) {
static constexpr double offcentredness =
static constexpr double offcenteredness =
0.02; // Shift lower limit by about 5 for range of 255.
switch (range) {
case RandomRange::kGeneral:
@ -262,8 +262,8 @@ struct RandomRangeBounds<Scalar, false> {
case RandomRange::kOffCenterAvoidMinValue:
return 1 + std::numeric_limits<Scalar>::lowest() +
static_cast<Scalar>(
offcentredness * std::numeric_limits<Scalar>::max() -
offcentredness *
offcenteredness * std::numeric_limits<Scalar>::max() -
offcenteredness *
(std::numeric_limits<Scalar>::lowest() + 1));
case RandomRange::kReasonableSrcZeroPoint:
return std::numeric_limits<Scalar>::lowest();

View File

@ -129,16 +129,16 @@ class TuningResolver {
// access to that.
friend class TuneTool;
// Actually runs a nano-benchmark, producing a real number called 'ratio'
// whose meaning is generally opaque / implemenation defined. Typically,
// whose meaning is generally opaque / implementation defined. Typically,
// this would be the ratio between the latencies of two different
// pieces of asm code differing only by the ordering of instructions,
// revealing whether the CPU cares about such ordering details.
// An implemenation may just return a dummy value if it is not based on
// An implementation may just return a dummy value if it is not based on
// such nanobenchmarking / ratio evaluation.
float EvalRatio();
// Empirically determined threshold on ratio values delineating
// out-of-order (ratios closer to 1) from in-order (ratios farther from 1).
// An implemenation may just return a dummy value if it is not based on
// An implementation may just return a dummy value if it is not based on
// such nanobenchmarking / ratio evaluation.
float ThresholdRatio();
// Perform the tuning resolution now. That may typically use EvalRatio and

View File

@ -363,7 +363,7 @@ public abstract class TensorBuffer {
return true;
}
// This shape refers to a multidimentional array.
// This shape refers to a multidimensional array.
for (int s : shape) {
// All elements in shape should be non-negative.
if (s < 0) {

View File

@ -270,7 +270,7 @@ extension Interpreter {
}
}
/// A type alias for `Interpreter.Options` to support backwards compatiblity with the deprecated
/// A type alias for `Interpreter.Options` to support backwards compatibility with the deprecated
/// `InterpreterOptions` struct.
@available(*, deprecated, renamed: "Interpreter.Options")
public typealias InterpreterOptions = Interpreter.Options

View File

@ -44,7 +44,7 @@ extension InterpreterError: LocalizedError {
case .failedToCreateInterpreter:
return "Failed to create the interpreter."
case .failedToResizeInputTensor(let index):
return "Failed to resize input tesnor at index \(index)."
return "Failed to resize input tensor at index \(index)."
case .failedToCopyDataToInputTensor:
return "Failed to copy data to input tensor."
case .failedToAllocateTensors: