minor spelling tweaks
This commit is contained in:
parent
49eac7f8e2
commit
c6880467be
tensorflow/lite/experimental
examples/lstm
kernels
objc/apis
resource
ruy
support/java/src/java/org/tensorflow/lite/support/tensorbuffer
swift/Sources
@ -154,7 +154,7 @@ class MnistLstmModel(object):
|
|||||||
"""Build the model using the given configs.
|
"""Build the model using the given configs.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
x: The input placehoder tensor.
|
x: The input placeholder tensor.
|
||||||
logits: The logits of the output.
|
logits: The logits of the output.
|
||||||
output_class: The prediction.
|
output_class: The prediction.
|
||||||
"""
|
"""
|
||||||
|
@ -163,7 +163,7 @@ class TFLiteLSTMCell(rnn_cell_impl.LayerRNNCell):
|
|||||||
"""Long short-term memory unit (LSTM) recurrent network cell.
|
"""Long short-term memory unit (LSTM) recurrent network cell.
|
||||||
|
|
||||||
This is used only for TfLite, it provides hints and it also makes the
|
This is used only for TfLite, it provides hints and it also makes the
|
||||||
variables in the desired for the tflite ops (transposed and seaparated).
|
variables in the desired for the tflite ops (transposed and separated).
|
||||||
|
|
||||||
The default non-peephole implementation is based on:
|
The default non-peephole implementation is based on:
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ limitations under the License.
|
|||||||
|
|
||||||
namespace tflite {
|
namespace tflite {
|
||||||
|
|
||||||
// Forward declaraction for op kernels.
|
// Forward declaration for op kernels.
|
||||||
namespace ops {
|
namespace ops {
|
||||||
namespace custom {
|
namespace custom {
|
||||||
|
|
||||||
@ -505,7 +505,7 @@ class HashtableGraph {
|
|||||||
TestErrorReporter error_reporter_;
|
TestErrorReporter error_reporter_;
|
||||||
};
|
};
|
||||||
|
|
||||||
// HashtableDefaultGraphTest tests hash table feautres on a basic graph, created
|
// HashtableDefaultGraphTest tests hash table features on a basic graph, created
|
||||||
// by the HashtableGraph class.
|
// by the HashtableGraph class.
|
||||||
template <typename KeyType, typename ValueType>
|
template <typename KeyType, typename ValueType>
|
||||||
class HashtableDefaultGraphTest {
|
class HashtableDefaultGraphTest {
|
||||||
|
@ -63,10 +63,10 @@ typedef NS_ENUM(NSUInteger, TFLInterpreterErrorCode) {
|
|||||||
/** Failed to allocate memory for tensors. */
|
/** Failed to allocate memory for tensors. */
|
||||||
TFLInterpreterErrorCodeFailedToAllocateTensors,
|
TFLInterpreterErrorCodeFailedToAllocateTensors,
|
||||||
|
|
||||||
/** Operaton not allowed without allocating memory for tensors first. */
|
/** Operation not allowed without allocating memory for tensors first. */
|
||||||
TFLInterpreterErrorCodeAllocateTensorsRequired,
|
TFLInterpreterErrorCodeAllocateTensorsRequired,
|
||||||
|
|
||||||
/** Operaton not allowed without invoking the interpreter first. */
|
/** Operation not allowed without invoking the interpreter first. */
|
||||||
TFLInterpreterErrorCodeInvokeInterpreterRequired,
|
TFLInterpreterErrorCodeInvokeInterpreterRequired,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ class TensorReader {
|
|||||||
const T* input_data_;
|
const T* input_data_;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Helper class for accesing TFLite tensor data. This specialized class is for
|
/// Helper class for accessing TFLite tensor data. This specialized class is for
|
||||||
/// std::string type.
|
/// std::string type.
|
||||||
template <>
|
template <>
|
||||||
class TensorReader<std::string> {
|
class TensorReader<std::string> {
|
||||||
|
@ -567,7 +567,7 @@ inline void PackFloatAvx2Packer(const float* src_ptr, const float* zerobuf,
|
|||||||
RUY_DCHECK_EQ(PackImplFloatAvx2::Layout::kCols, 8);
|
RUY_DCHECK_EQ(PackImplFloatAvx2::Layout::kCols, 8);
|
||||||
RUY_DCHECK_EQ(PackImplFloatAvx2::Layout::kRows, 1);
|
RUY_DCHECK_EQ(PackImplFloatAvx2::Layout::kRows, 1);
|
||||||
|
|
||||||
// This packing amounts to tranposition of 8x8 blocks.
|
// This packing amounts to transposition of 8x8 blocks.
|
||||||
static constexpr int kPackCols = 8; // Source cols packed together.
|
static constexpr int kPackCols = 8; // Source cols packed together.
|
||||||
static constexpr int kPackRows = 8; // Short input is padded.
|
static constexpr int kPackRows = 8; // Short input is padded.
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ limitations under the License.
|
|||||||
//
|
//
|
||||||
// These are mostly sub-selections of architectures.
|
// These are mostly sub-selections of architectures.
|
||||||
|
|
||||||
// Detect NEON. Explictly avoid emulation, or anything like it, on x86.
|
// Detect NEON. Explicitly avoid emulation, or anything like it, on x86.
|
||||||
#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && !RUY_PLATFORM(X86)
|
#if (defined(__ARM_NEON) || defined(__ARM_NEON__)) && !RUY_PLATFORM(X86)
|
||||||
#define RUY_DONOTUSEDIRECTLY_NEON 1
|
#define RUY_DONOTUSEDIRECTLY_NEON 1
|
||||||
#else
|
#else
|
||||||
|
@ -252,7 +252,7 @@ Scalar Parametrized(float param) {
|
|||||||
template <typename Scalar>
|
template <typename Scalar>
|
||||||
struct RandomRangeBounds<Scalar, false> {
|
struct RandomRangeBounds<Scalar, false> {
|
||||||
static Scalar GetMinBound(RandomRange range) {
|
static Scalar GetMinBound(RandomRange range) {
|
||||||
static constexpr double offcentredness =
|
static constexpr double offcenteredness =
|
||||||
0.02; // Shift lower limit by about 5 for range of 255.
|
0.02; // Shift lower limit by about 5 for range of 255.
|
||||||
switch (range) {
|
switch (range) {
|
||||||
case RandomRange::kGeneral:
|
case RandomRange::kGeneral:
|
||||||
@ -262,8 +262,8 @@ struct RandomRangeBounds<Scalar, false> {
|
|||||||
case RandomRange::kOffCenterAvoidMinValue:
|
case RandomRange::kOffCenterAvoidMinValue:
|
||||||
return 1 + std::numeric_limits<Scalar>::lowest() +
|
return 1 + std::numeric_limits<Scalar>::lowest() +
|
||||||
static_cast<Scalar>(
|
static_cast<Scalar>(
|
||||||
offcentredness * std::numeric_limits<Scalar>::max() -
|
offcenteredness * std::numeric_limits<Scalar>::max() -
|
||||||
offcentredness *
|
offcenteredness *
|
||||||
(std::numeric_limits<Scalar>::lowest() + 1));
|
(std::numeric_limits<Scalar>::lowest() + 1));
|
||||||
case RandomRange::kReasonableSrcZeroPoint:
|
case RandomRange::kReasonableSrcZeroPoint:
|
||||||
return std::numeric_limits<Scalar>::lowest();
|
return std::numeric_limits<Scalar>::lowest();
|
||||||
|
@ -129,16 +129,16 @@ class TuningResolver {
|
|||||||
// access to that.
|
// access to that.
|
||||||
friend class TuneTool;
|
friend class TuneTool;
|
||||||
// Actually runs a nano-benchmark, producing a real number called 'ratio'
|
// Actually runs a nano-benchmark, producing a real number called 'ratio'
|
||||||
// whose meaning is generally opaque / implemenation defined. Typically,
|
// whose meaning is generally opaque / implementation defined. Typically,
|
||||||
// this would be the ratio between the latencies of two different
|
// this would be the ratio between the latencies of two different
|
||||||
// pieces of asm code differing only by the ordering of instructions,
|
// pieces of asm code differing only by the ordering of instructions,
|
||||||
// revealing whether the CPU cares about such ordering details.
|
// revealing whether the CPU cares about such ordering details.
|
||||||
// An implemenation may just return a dummy value if it is not based on
|
// An implementation may just return a dummy value if it is not based on
|
||||||
// such nanobenchmarking / ratio evaluation.
|
// such nanobenchmarking / ratio evaluation.
|
||||||
float EvalRatio();
|
float EvalRatio();
|
||||||
// Empirically determined threshold on ratio values delineating
|
// Empirically determined threshold on ratio values delineating
|
||||||
// out-of-order (ratios closer to 1) from in-order (ratios farther from 1).
|
// out-of-order (ratios closer to 1) from in-order (ratios farther from 1).
|
||||||
// An implemenation may just return a dummy value if it is not based on
|
// An implementation may just return a dummy value if it is not based on
|
||||||
// such nanobenchmarking / ratio evaluation.
|
// such nanobenchmarking / ratio evaluation.
|
||||||
float ThresholdRatio();
|
float ThresholdRatio();
|
||||||
// Perform the tuning resolution now. That may typically use EvalRatio and
|
// Perform the tuning resolution now. That may typically use EvalRatio and
|
||||||
|
@ -363,7 +363,7 @@ public abstract class TensorBuffer {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// This shape refers to a multidimentional array.
|
// This shape refers to a multidimensional array.
|
||||||
for (int s : shape) {
|
for (int s : shape) {
|
||||||
// All elements in shape should be non-negative.
|
// All elements in shape should be non-negative.
|
||||||
if (s < 0) {
|
if (s < 0) {
|
||||||
|
@ -270,7 +270,7 @@ extension Interpreter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A type alias for `Interpreter.Options` to support backwards compatiblity with the deprecated
|
/// A type alias for `Interpreter.Options` to support backwards compatibility with the deprecated
|
||||||
/// `InterpreterOptions` struct.
|
/// `InterpreterOptions` struct.
|
||||||
@available(*, deprecated, renamed: "Interpreter.Options")
|
@available(*, deprecated, renamed: "Interpreter.Options")
|
||||||
public typealias InterpreterOptions = Interpreter.Options
|
public typealias InterpreterOptions = Interpreter.Options
|
||||||
|
@ -44,7 +44,7 @@ extension InterpreterError: LocalizedError {
|
|||||||
case .failedToCreateInterpreter:
|
case .failedToCreateInterpreter:
|
||||||
return "Failed to create the interpreter."
|
return "Failed to create the interpreter."
|
||||||
case .failedToResizeInputTensor(let index):
|
case .failedToResizeInputTensor(let index):
|
||||||
return "Failed to resize input tesnor at index \(index)."
|
return "Failed to resize input tensor at index \(index)."
|
||||||
case .failedToCopyDataToInputTensor:
|
case .failedToCopyDataToInputTensor:
|
||||||
return "Failed to copy data to input tensor."
|
return "Failed to copy data to input tensor."
|
||||||
case .failedToAllocateTensors:
|
case .failedToAllocateTensors:
|
||||||
|
Loading…
Reference in New Issue
Block a user