fix: typos using misspell
fix: typos This PR is part of a campaign to fix a lot of typos on github! You can see the progress on https://github.com/fixTypos/fix_typos/ https://github.com/client9/misspell
This commit is contained in:
parent
f2a46993e3
commit
1f6ed06221
4
configure
vendored
4
configure
vendored
@ -230,7 +230,7 @@ if [ "$TF_NEED_MKL" == "1" ]; then # TF_NEED_MKL
|
|||||||
if [ -z "$MKL_INSTALL_PATH" ]; then
|
if [ -z "$MKL_INSTALL_PATH" ]; then
|
||||||
MKL_INSTALL_PATH=$default_mkl_path
|
MKL_INSTALL_PATH=$default_mkl_path
|
||||||
fi
|
fi
|
||||||
# Result returned from "read" will be used unexpanded. That make "~" unuseable.
|
# Result returned from "read" will be used unexpanded. That make "~" unusable.
|
||||||
# Going through one more level of expansion to handle that.
|
# Going through one more level of expansion to handle that.
|
||||||
MKL_INSTALL_PATH=`${PYTHON_BIN_PATH} -c "import os; print(os.path.realpath(os.path.expanduser('${MKL_INSTALL_PATH}')))"`
|
MKL_INSTALL_PATH=`${PYTHON_BIN_PATH} -c "import os; print(os.path.realpath(os.path.expanduser('${MKL_INSTALL_PATH}')))"`
|
||||||
fi
|
fi
|
||||||
@ -565,7 +565,7 @@ while true; do
|
|||||||
if [ -z "$CUDNN_INSTALL_PATH" ]; then
|
if [ -z "$CUDNN_INSTALL_PATH" ]; then
|
||||||
CUDNN_INSTALL_PATH=$default_cudnn_path
|
CUDNN_INSTALL_PATH=$default_cudnn_path
|
||||||
fi
|
fi
|
||||||
# Result returned from "read" will be used unexpanded. That make "~" unuseable.
|
# Result returned from "read" will be used unexpanded. That make "~" unusable.
|
||||||
# Going through one more level of expansion to handle that.
|
# Going through one more level of expansion to handle that.
|
||||||
CUDNN_INSTALL_PATH=`"${PYTHON_BIN_PATH}" -c "import os; print(os.path.realpath(os.path.expanduser('${CUDNN_INSTALL_PATH}')))"`
|
CUDNN_INSTALL_PATH=`"${PYTHON_BIN_PATH}" -c "import os; print(os.path.realpath(os.path.expanduser('${CUDNN_INSTALL_PATH}')))"`
|
||||||
fi
|
fi
|
||||||
|
@ -65,7 +65,7 @@ class Array4D {
|
|||||||
Fill(T());
|
Fill(T());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a 4D array, initalized to value.
|
// Creates a 4D array, initialized to value.
|
||||||
Array4D(int64 planes, int64 depth, int64 height, int64 width, T value)
|
Array4D(int64 planes, int64 depth, int64 height, int64 width, T value)
|
||||||
: Array4D(planes, depth, height, width) {
|
: Array4D(planes, depth, height, width) {
|
||||||
Fill(value);
|
Fill(value);
|
||||||
|
@ -56,7 +56,7 @@ class ExecutableBuildOptions {
|
|||||||
|
|
||||||
// If set, this specifies the layout of the result of the computation. If not
|
// If set, this specifies the layout of the result of the computation. If not
|
||||||
// set, the service will chose the layout of the result. A Shape is used to
|
// set, the service will chose the layout of the result. A Shape is used to
|
||||||
// store the layout to accomodate tuple result shapes. A value of nullptr
|
// store the layout to accommodate tuple result shapes. A value of nullptr
|
||||||
// indicates the option has not been set.
|
// indicates the option has not been set.
|
||||||
ExecutableBuildOptions& set_result_layout(const Shape& shape_with_layout);
|
ExecutableBuildOptions& set_result_layout(const Shape& shape_with_layout);
|
||||||
const Shape* result_layout() const;
|
const Shape* result_layout() const;
|
||||||
|
@ -128,7 +128,7 @@ class LiteralUtil {
|
|||||||
|
|
||||||
// Creates a new value that has the equivalent value as literal, but conforms
|
// Creates a new value that has the equivalent value as literal, but conforms
|
||||||
// to new_layout; e.g. a literal matrix that was in {0, 1} minor-to-major
|
// to new_layout; e.g. a literal matrix that was in {0, 1} minor-to-major
|
||||||
// dimension layout can be re-layed-out as {1, 0} minor-to-major dimension
|
// dimension layout can be re-laid-out as {1, 0} minor-to-major dimension
|
||||||
// layout and the value in the cell at any given logical index (i0, i1) will
|
// layout and the value in the cell at any given logical index (i0, i1) will
|
||||||
// be the same.
|
// be the same.
|
||||||
//
|
//
|
||||||
|
@ -628,7 +628,7 @@ class FusedDynamicUpdateSliceLivenessTest : public BufferLivenessTest {
|
|||||||
BufferLiveness::Run(module.get(),
|
BufferLiveness::Run(module.get(),
|
||||||
MakeUnique<DependencyHloOrdering>(module.get()))
|
MakeUnique<DependencyHloOrdering>(module.get()))
|
||||||
.ConsumeValueOrDie();
|
.ConsumeValueOrDie();
|
||||||
// Return whether or not buffers interfernce is detected between
|
// Return whether or not buffers interference is detected between
|
||||||
// 'tuple_param0' and 'tuple_root' at shape index '{1}'.
|
// 'tuple_param0' and 'tuple_root' at shape index '{1}'.
|
||||||
return TupleElementsMayInterfere(*liveness, tuple_param0, tuple_root, {1});
|
return TupleElementsMayInterfere(*liveness, tuple_param0, tuple_root, {1});
|
||||||
}
|
}
|
||||||
@ -740,7 +740,7 @@ class DynamicUpdateSliceLivenessTest : public BufferLivenessTest {
|
|||||||
BufferLiveness::Run(module.get(),
|
BufferLiveness::Run(module.get(),
|
||||||
MakeUnique<DependencyHloOrdering>(module.get()))
|
MakeUnique<DependencyHloOrdering>(module.get()))
|
||||||
.ConsumeValueOrDie();
|
.ConsumeValueOrDie();
|
||||||
// Return whether or not buffers interfernce is detected between
|
// Return whether or not buffers interference is detected between
|
||||||
// 'tuple_param0' and 'tuple_root' at shape index '{1}'.
|
// 'tuple_param0' and 'tuple_root' at shape index '{1}'.
|
||||||
return TupleElementsMayInterfere(*liveness, tuple_param0, tuple_root, {1});
|
return TupleElementsMayInterfere(*liveness, tuple_param0, tuple_root, {1});
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ class InstructionCopier {
|
|||||||
Status RecordAmbiguousOrNonDistinctIndices(
|
Status RecordAmbiguousOrNonDistinctIndices(
|
||||||
const TuplePointsToAnalysis& points_to_analysis);
|
const TuplePointsToAnalysis& points_to_analysis);
|
||||||
|
|
||||||
// Records instruction buffer indices which have interferring live ranges
|
// Records instruction buffer indices which have interfering live ranges
|
||||||
// with 'other_instruction' buffers at same index.
|
// with 'other_instruction' buffers at same index.
|
||||||
Status RecordIndicesWhichInterfereWithOtherInstruction(
|
Status RecordIndicesWhichInterfereWithOtherInstruction(
|
||||||
const BufferLiveness& liveness, const HloInstruction* other_instruction,
|
const BufferLiveness& liveness, const HloInstruction* other_instruction,
|
||||||
@ -431,7 +431,7 @@ HloInstruction* InstructionCopier::Copy() {
|
|||||||
return copy;
|
return copy;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The 'read_only_indices' are initalized based on points-to analysis on the
|
// The 'read_only_indices' are initialized based on points-to analysis on the
|
||||||
// while body corresponding to 'while_hlo'. If the init buffer corresponding to
|
// while body corresponding to 'while_hlo'. If the init buffer corresponding to
|
||||||
// a read-only index aliases with an entry parameter (or constant), it cannot be
|
// a read-only index aliases with an entry parameter (or constant), it cannot be
|
||||||
// considered read-only, and must be copied. This is necessary because some
|
// considered read-only, and must be copied. This is necessary because some
|
||||||
|
@ -972,7 +972,7 @@ TEST_F(WhileCopyInsertionTest, InitPointsToNonDistinct) {
|
|||||||
op::Copy(old_init->operand(1)->operand(0)))));
|
op::Copy(old_init->operand(1)->operand(0)))));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests while init instruction buffer which interfers with while result buffer.
|
// Tests while init instruction buffer which interferes with while result buffer.
|
||||||
//
|
//
|
||||||
// init_data = Broadcast(...)
|
// init_data = Broadcast(...)
|
||||||
// add_unrelated = Add(init_data) // takes a reference to cause interference
|
// add_unrelated = Add(init_data) // takes a reference to cause interference
|
||||||
|
@ -125,7 +125,7 @@ tensorflow::Status ConvolutionThunk::ExecuteOnStream(
|
|||||||
CHECK_LE(num_dimensions, 3);
|
CHECK_LE(num_dimensions, 3);
|
||||||
// cuDNN does not support 1D convolutions. We therefore express 1D
|
// cuDNN does not support 1D convolutions. We therefore express 1D
|
||||||
// convolutions as 2D convolutions where the first spatial dimension is 1.
|
// convolutions as 2D convolutions where the first spatial dimension is 1.
|
||||||
// This matches the behaviour of TF (see definition of conv1d in
|
// This matches the behavior of TF (see definition of conv1d in
|
||||||
// tensorflow/python/ops/nn_ops.py).
|
// tensorflow/python/ops/nn_ops.py).
|
||||||
const int effective_num_dimensions = std::max(2, num_dimensions);
|
const int effective_num_dimensions = std::max(2, num_dimensions);
|
||||||
|
|
||||||
|
@ -405,9 +405,9 @@ StatusOr<string> CompileModuleToPtx(llvm::Module* module,
|
|||||||
|
|
||||||
AddOptimizationPasses(flags->opt_level, /*size_level=*/0,
|
AddOptimizationPasses(flags->opt_level, /*size_level=*/0,
|
||||||
target_machine.get(), &module_passes, &function_passes);
|
target_machine.get(), &module_passes, &function_passes);
|
||||||
// Loop unrolling exposes more opportunites for SROA. Therefore, we run SROA
|
// Loop unrolling exposes more opportunities for SROA. Therefore, we run SROA
|
||||||
// again after the standard optimization passes [http://b/13329423].
|
// again after the standard optimization passes [http://b/13329423].
|
||||||
// TODO(jingyue): SROA may further expose more optimization opportunites, such
|
// TODO(jingyue): SROA may further expose more optimization opportunities, such
|
||||||
// as more precise alias analysis and more function inlining (SROA may change
|
// as more precise alias analysis and more function inlining (SROA may change
|
||||||
// the inlining cost of a function). For now, running SROA already emits good
|
// the inlining cost of a function). For now, running SROA already emits good
|
||||||
// enough code for the evaluated benchmarks. We may want to run more
|
// enough code for the evaluated benchmarks. We may want to run more
|
||||||
|
@ -33,7 +33,7 @@ namespace gpu {
|
|||||||
enum class PartitionStrategy {
|
enum class PartitionStrategy {
|
||||||
// Optimized for latency by allowing maximum number of registers per thread.
|
// Optimized for latency by allowing maximum number of registers per thread.
|
||||||
kLatency,
|
kLatency,
|
||||||
// Optimized for throughtput. This may limit registers per thread and cause
|
// Optimized for throughput. This may limit registers per thread and cause
|
||||||
// longer latency.
|
// longer latency.
|
||||||
kThroughput
|
kThroughput
|
||||||
};
|
};
|
||||||
|
@ -37,7 +37,7 @@ namespace {
|
|||||||
// patterns to match.
|
// patterns to match.
|
||||||
//
|
//
|
||||||
// Each ExprTree node is comprised of an HloOpcode, and a set of operands (each
|
// Each ExprTree node is comprised of an HloOpcode, and a set of operands (each
|
||||||
// of type ExprTree). Operands can be added by specifing the index and HloOpcode
|
// of type ExprTree). Operands can be added by specifying the index and HloOpcode
|
||||||
// of the operand.
|
// of the operand.
|
||||||
//
|
//
|
||||||
// For example, the following computation:
|
// For example, the following computation:
|
||||||
|
@ -21,7 +21,7 @@ limitations under the License.
|
|||||||
|
|
||||||
namespace xla {
|
namespace xla {
|
||||||
|
|
||||||
// A pass which performs constant folding in order to avoid unecessary
|
// A pass which performs constant folding in order to avoid unnecessary
|
||||||
// computation on constants.
|
// computation on constants.
|
||||||
class HloConstantFolding : public HloPassInterface {
|
class HloConstantFolding : public HloPassInterface {
|
||||||
public:
|
public:
|
||||||
|
@ -133,7 +133,7 @@ class HloCostAnalysis : public DfsHloVisitor {
|
|||||||
int64 bytes_accessed() const { return bytes_accessed_; }
|
int64 bytes_accessed() const { return bytes_accessed_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// An FMA counts as two floating point operations in these analyses.
|
// An FMA counts as two floating point operations in these analyzes.
|
||||||
static constexpr int64 kFmaFlops = 2;
|
static constexpr int64 kFmaFlops = 2;
|
||||||
|
|
||||||
// Utility function to handle all element-wise operations.
|
// Utility function to handle all element-wise operations.
|
||||||
|
@ -104,7 +104,7 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
|
|||||||
std::hash<int>>
|
std::hash<int>>
|
||||||
typed_visitors_;
|
typed_visitors_;
|
||||||
|
|
||||||
// Tracks the HLO instruciton and its evaluated literal result.
|
// Tracks the HLO instruction and its evaluated literal result.
|
||||||
// TODO(b/35950897): have better memory management here to free instructions
|
// TODO(b/35950897): have better memory management here to free instructions
|
||||||
// that are no longer a parent for any other subsequent instruction in
|
// that are no longer a parent for any other subsequent instruction in
|
||||||
// post-orderring.
|
// post-orderring.
|
||||||
|
@ -343,7 +343,7 @@ class ListScheduler {
|
|||||||
return freed_bytes;
|
return freed_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the scheduling priority of the given instruciton.
|
// Construct the scheduling priority of the given instruction.
|
||||||
Priority GetPriority(const HloInstruction* instruction) {
|
Priority GetPriority(const HloInstruction* instruction) {
|
||||||
return {BytesFreedIfScheduled(instruction), instruction->user_count()};
|
return {BytesFreedIfScheduled(instruction), instruction->user_count()};
|
||||||
}
|
}
|
||||||
|
@ -273,7 +273,7 @@ class LayoutAssignment : public HloPassInterface {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
// This method can be overriden to mark instructions as requiring the operands
|
// This method can be overridden to mark instructions as requiring the operands
|
||||||
// to have the same layout as the result, for performance or correctness. This
|
// to have the same layout as the result, for performance or correctness. This
|
||||||
// will propagate constraints through the instruction from the result into the
|
// will propagate constraints through the instruction from the result into the
|
||||||
// operands.
|
// operands.
|
||||||
|
@ -130,7 +130,7 @@ llvm::AllocaInst* EmitAllocaAtFunctionEntryWithCount(
|
|||||||
llvm::Type* type, llvm::Value* element_count, tensorflow::StringPiece name,
|
llvm::Type* type, llvm::Value* element_count, tensorflow::StringPiece name,
|
||||||
llvm::IRBuilder<>* ir_builder, int alignment = 0);
|
llvm::IRBuilder<>* ir_builder, int alignment = 0);
|
||||||
|
|
||||||
// Creates a basic block with the same context and funtion as for the
|
// Creates a basic block with the same context and function as for the
|
||||||
// builder. Inserts at the end of the function if insert_before is
|
// builder. Inserts at the end of the function if insert_before is
|
||||||
// null.
|
// null.
|
||||||
llvm::BasicBlock* CreateBasicBlock(llvm::BasicBlock* insert_before,
|
llvm::BasicBlock* CreateBasicBlock(llvm::BasicBlock* insert_before,
|
||||||
|
@ -65,7 +65,7 @@ class HloTestBase : public ::testing::Test {
|
|||||||
perftools::gputools::DeviceMemoryBase TransferToDevice(
|
perftools::gputools::DeviceMemoryBase TransferToDevice(
|
||||||
const Literal& literal);
|
const Literal& literal);
|
||||||
|
|
||||||
// Transfers the array refered to by the given handle from the device and
|
// Transfers the array referred to by the given handle from the device and
|
||||||
// returns as a Literal.
|
// returns as a Literal.
|
||||||
std::unique_ptr<Literal> TransferFromDevice(
|
std::unique_ptr<Literal> TransferFromDevice(
|
||||||
const Shape& shape, perftools::gputools::DeviceMemoryBase device_base);
|
const Shape& shape, perftools::gputools::DeviceMemoryBase device_base);
|
||||||
|
@ -194,7 +194,7 @@ XLA_TEST_F(PrngTest, MapUsingRng) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This tests demonstrates the global seeding behaviour.
|
// This tests demonstrates the global seeding behavior.
|
||||||
// * If a seed is passed in via Execute (ExecuteAndTransfer) then the output is
|
// * If a seed is passed in via Execute (ExecuteAndTransfer) then the output is
|
||||||
// fixed (i.e., there is a single output for a given seed);
|
// fixed (i.e., there is a single output for a given seed);
|
||||||
// * If no seed is passed in then the output of every call can be different;
|
// * If no seed is passed in then the output of every call can be different;
|
||||||
|
@ -177,7 +177,7 @@ def _logspace_mean(log_values):
|
|||||||
`Log[Mean[values]]`.
|
`Log[Mean[values]]`.
|
||||||
"""
|
"""
|
||||||
# center = Max[Log[values]], with stop-gradient
|
# center = Max[Log[values]], with stop-gradient
|
||||||
# The center hopefully keep the exponentiated term small. It is cancelled
|
# The center hopefully keep the exponentiated term small. It is canceled
|
||||||
# from the final result, so putting stop gradient on it will not change the
|
# from the final result, so putting stop gradient on it will not change the
|
||||||
# final result. We put stop gradient on to eliminate unnecessary computation.
|
# final result. We put stop gradient on to eliminate unnecessary computation.
|
||||||
center = array_ops.stop_gradient(_sample_max(log_values))
|
center = array_ops.stop_gradient(_sample_max(log_values))
|
||||||
|
@ -42,7 +42,7 @@ class RandomTreeGen {
|
|||||||
boosted_trees::trees::DecisionTreeConfig Generate(
|
boosted_trees::trees::DecisionTreeConfig Generate(
|
||||||
const boosted_trees::trees::DecisionTreeConfig& tree);
|
const boosted_trees::trees::DecisionTreeConfig& tree);
|
||||||
|
|
||||||
// Requried: depth >= 1; tree_count >= 1.
|
// Required: depth >= 1; tree_count >= 1.
|
||||||
boosted_trees::trees::DecisionTreeEnsembleConfig GenerateEnsemble(
|
boosted_trees::trees::DecisionTreeEnsembleConfig GenerateEnsemble(
|
||||||
int dept, int tree_count);
|
int dept, int tree_count);
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ Status GetTableAttrs(OpKernelConstruction* context, string* project_id,
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// Note that overriden methods with names ending in "Locked" are called by
|
// Note that overridden methods with names ending in "Locked" are called by
|
||||||
// ReaderBase while a mutex is held.
|
// ReaderBase while a mutex is held.
|
||||||
// See comments for ReaderBase.
|
// See comments for ReaderBase.
|
||||||
class BigQueryReader : public ReaderBase {
|
class BigQueryReader : public ReaderBase {
|
||||||
|
@ -46,7 +46,7 @@ _TABLE = "test-table"
|
|||||||
# The values for rows are generated such that some columns have null values. The
|
# The values for rows are generated such that some columns have null values. The
|
||||||
# general formula here is:
|
# general formula here is:
|
||||||
# - The int64 column is present in every row.
|
# - The int64 column is present in every row.
|
||||||
# - The string column is only avaiable in even rows.
|
# - The string column is only available in even rows.
|
||||||
# - The float column is only available in every third row.
|
# - The float column is only available in every third row.
|
||||||
_ROWS = [[0, "s_0", 0.1], [1, None, None], [2, "s_2", None], [3, None, 3.1],
|
_ROWS = [[0, "s_0", 0.1], [1, None, None], [2, "s_2", None], [3, None, 3.1],
|
||||||
[4, "s_4", None], [5, None, None], [6, "s_6", 6.1], [7, None, None],
|
[4, "s_4", None], [5, None, None], [6, "s_6", 6.1], [7, None, None],
|
||||||
|
@ -141,7 +141,7 @@ _cudnn_rnn_common_doc_string = """
|
|||||||
* Once a while, the user saves the parameter buffer into model checkpoints
|
* Once a while, the user saves the parameter buffer into model checkpoints
|
||||||
with Saver.save().
|
with Saver.save().
|
||||||
* When restoring, the user creates a RNNParamsSaveable object and uses
|
* When restoring, the user creates a RNNParamsSaveable object and uses
|
||||||
Saver.restore() to restore the paramter buffer from the canonical format
|
Saver.restore() to restore the parameter buffer from the canonical format
|
||||||
to a user-defined format, as well as to restore other savable objects
|
to a user-defined format, as well as to restore other savable objects
|
||||||
in the checkpoint file.
|
in the checkpoint file.
|
||||||
"""
|
"""
|
||||||
|
@ -39,7 +39,7 @@ class _ExperimentalFuncGraph(function._FuncGraph):
|
|||||||
_ExperimentalFuncGraph overrides ops.Graph's create_op() so that we can keep
|
_ExperimentalFuncGraph overrides ops.Graph's create_op() so that we can keep
|
||||||
track of every inputs into every op created inside the function. If
|
track of every inputs into every op created inside the function. If
|
||||||
any input is from other graphs, we keep track of it in self.capture
|
any input is from other graphs, we keep track of it in self.capture
|
||||||
and substitue the input with a place holder.
|
and substitute the input with a place holder.
|
||||||
|
|
||||||
Each captured input's corresponding place holder is converted into a
|
Each captured input's corresponding place holder is converted into a
|
||||||
function argument and the caller passes in the captured tensor.
|
function argument and the caller passes in the captured tensor.
|
||||||
|
@ -38,7 +38,7 @@ class _FakeVectorStudentT(object):
|
|||||||
|
|
||||||
Other `Vector*` implementations need only test new code. That we don't need
|
Other `Vector*` implementations need only test new code. That we don't need
|
||||||
to test every Vector* distribution is good because there aren't SciPy
|
to test every Vector* distribution is good because there aren't SciPy
|
||||||
analogues and reimplementing everything in NumPy sort of defeats the point of
|
analogs and reimplementing everything in NumPy sort of defeats the point of
|
||||||
having the `TransformedDistribution + Affine` API.
|
having the `TransformedDistribution + Affine` API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -269,7 +269,7 @@ class Binomial(distribution.Distribution):
|
|||||||
message="total_count must be non-negative."),
|
message="total_count must be non-negative."),
|
||||||
distribution_util.assert_integer_form(
|
distribution_util.assert_integer_form(
|
||||||
total_count,
|
total_count,
|
||||||
message="total_count cannot contain fractional componentes."),
|
message="total_count cannot contain fractional components."),
|
||||||
], total_count)
|
], total_count)
|
||||||
|
|
||||||
def _maybe_assert_valid_sample(self, counts, check_integer=True):
|
def _maybe_assert_valid_sample(self, counts, check_integer=True):
|
||||||
|
@ -130,7 +130,7 @@ def transform_tree(tree, fn, iterable_type=tuple):
|
|||||||
tree: iterable or not. If iterable, its elements (child) can also be
|
tree: iterable or not. If iterable, its elements (child) can also be
|
||||||
iterable or not.
|
iterable or not.
|
||||||
fn: function to apply to each leaves.
|
fn: function to apply to each leaves.
|
||||||
iterable_type: type use to construct the resulting tree for unknwon
|
iterable_type: type use to construct the resulting tree for unknown
|
||||||
iterable, typically `list` or `tuple`.
|
iterable, typically `list` or `tuple`.
|
||||||
Returns:
|
Returns:
|
||||||
A tree whose leaves has been transformed by `fn`.
|
A tree whose leaves has been transformed by `fn`.
|
||||||
|
@ -5,7 +5,7 @@ of `SessionRunHook` and are to be used with helpers like `MonitoredSession`
|
|||||||
and `learn.Estimator` that wrap `tensorflow.Session`.
|
and `learn.Estimator` that wrap `tensorflow.Session`.
|
||||||
|
|
||||||
The hooks are called between invocations of `Session.run()` to perform custom
|
The hooks are called between invocations of `Session.run()` to perform custom
|
||||||
behaviour.
|
behavior.
|
||||||
|
|
||||||
For example the `ProfilerHook` periodically collects `RunMetadata` after
|
For example the `ProfilerHook` periodically collects `RunMetadata` after
|
||||||
`Session.run()` and saves profiling information that can be viewed in a
|
`Session.run()` and saves profiling information that can be viewed in a
|
||||||
|
@ -77,7 +77,7 @@ REGISTER_OP("BipartiteMatch")
|
|||||||
.Doc(R"doc(
|
.Doc(R"doc(
|
||||||
Find bipartite matching based on a given distance matrix.
|
Find bipartite matching based on a given distance matrix.
|
||||||
|
|
||||||
A greedy bi-partite matching alogrithm is used to obtain the matching with the
|
A greedy bi-partite matching algorithm is used to obtain the matching with the
|
||||||
(greedy) minimum distance.
|
(greedy) minimum distance.
|
||||||
|
|
||||||
distance_mat: A 2-D float tensor of shape `[num_rows, num_columns]`. It is a
|
distance_mat: A 2-D float tensor of shape `[num_rows, num_columns]`. It is a
|
||||||
|
@ -266,7 +266,7 @@ def bipartite_match(
|
|||||||
top_k=-1):
|
top_k=-1):
|
||||||
"""Find bipartite matching based on a given distance matrix.
|
"""Find bipartite matching based on a given distance matrix.
|
||||||
|
|
||||||
A greedy bi-partite matching alogrithm is used to obtain the matching with
|
A greedy bi-partite matching algorithm is used to obtain the matching with
|
||||||
the (greedy) minimum distance.
|
the (greedy) minimum distance.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -59,7 +59,7 @@ def identity_block(input_tensor, kernel_size, filters, stage, block):
|
|||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
input_tensor: input tensor
|
input_tensor: input tensor
|
||||||
kernel_size: defualt 3, the kernel size of middle conv layer at main path
|
kernel_size: default 3, the kernel size of middle conv layer at main path
|
||||||
filters: list of integers, the filterss of 3 conv layer at main path
|
filters: list of integers, the filterss of 3 conv layer at main path
|
||||||
stage: integer, current stage label, used for generating layer names
|
stage: integer, current stage label, used for generating layer names
|
||||||
block: 'a','b'..., current block label, used for generating layer names
|
block: 'a','b'..., current block label, used for generating layer names
|
||||||
@ -98,7 +98,7 @@ def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2,
|
|||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
input_tensor: input tensor
|
input_tensor: input tensor
|
||||||
kernel_size: defualt 3, the kernel size of middle conv layer at main path
|
kernel_size: default 3, the kernel size of middle conv layer at main path
|
||||||
filters: list of integers, the filterss of 3 conv layer at main path
|
filters: list of integers, the filterss of 3 conv layer at main path
|
||||||
stage: integer, current stage label, used for generating layer names
|
stage: integer, current stage label, used for generating layer names
|
||||||
block: 'a','b'..., current block label, used for generating layer names
|
block: 'a','b'..., current block label, used for generating layer names
|
||||||
|
@ -91,7 +91,7 @@ _IMAGE_DATA_FORMAT = 'channels_last'
|
|||||||
def backend():
|
def backend():
|
||||||
"""Publicly accessible method for determining the current backend.
|
"""Publicly accessible method for determining the current backend.
|
||||||
|
|
||||||
Only exists for API compatibily with multi-backend Keras.
|
Only exists for API compatibility with multi-backend Keras.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The string "tensorflow".
|
The string "tensorflow".
|
||||||
@ -2617,7 +2617,7 @@ def in_train_phase(x, alt, training=None):
|
|||||||
(tensor or callable that returns a tensor).
|
(tensor or callable that returns a tensor).
|
||||||
training: Optional scalar tensor
|
training: Optional scalar tensor
|
||||||
(or Python boolean, or Python integer)
|
(or Python boolean, or Python integer)
|
||||||
specifing the learning phase.
|
specifying the learning phase.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Either `x` or `alt` based on the `training` flag.
|
Either `x` or `alt` based on the `training` flag.
|
||||||
@ -2660,7 +2660,7 @@ def in_test_phase(x, alt, training=None):
|
|||||||
(tensor or callable that returns a tensor).
|
(tensor or callable that returns a tensor).
|
||||||
training: Optional scalar tensor
|
training: Optional scalar tensor
|
||||||
(or Python boolean, or Python integer)
|
(or Python boolean, or Python integer)
|
||||||
specifing the learning phase.
|
specifying the learning phase.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Either `x` or `alt` based on `K.learning_phase`.
|
Either `x` or `alt` based on `K.learning_phase`.
|
||||||
|
@ -1546,7 +1546,7 @@ class Container(Layer):
|
|||||||
"""Retrieve the model's updates.
|
"""Retrieve the model's updates.
|
||||||
|
|
||||||
Will only include updates that are either
|
Will only include updates that are either
|
||||||
inconditional, or conditional on inputs to this model
|
unconditional, or conditional on inputs to this model
|
||||||
(e.g. will not include updates that depend on tensors
|
(e.g. will not include updates that depend on tensors
|
||||||
that aren't inputs to this model).
|
that aren't inputs to this model).
|
||||||
|
|
||||||
@ -1573,7 +1573,7 @@ class Container(Layer):
|
|||||||
"""Retrieve the model's losses.
|
"""Retrieve the model's losses.
|
||||||
|
|
||||||
Will only include losses that are either
|
Will only include losses that are either
|
||||||
inconditional, or conditional on inputs to this model
|
unconditional, or conditional on inputs to this model
|
||||||
(e.g. will not include losses that depend on tensors
|
(e.g. will not include losses that depend on tensors
|
||||||
that aren't inputs to this model).
|
that aren't inputs to this model).
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ class BaseWrapper(object):
|
|||||||
"""Gets parameters for this estimator.
|
"""Gets parameters for this estimator.
|
||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
**params: ignored (exists for API compatiblity).
|
**params: ignored (exists for API compatibility).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Dictionary of parameter names mapped to their values.
|
Dictionary of parameter names mapped to their values.
|
||||||
|
@ -85,7 +85,7 @@ class RandomFourierFeatureMapperTest(TensorFlowTestCase):
|
|||||||
mapped_x = rffm.map(x)
|
mapped_x = rffm.map(x)
|
||||||
mapped_x_copy = rffm.map(x)
|
mapped_x_copy = rffm.map(x)
|
||||||
# Two different evaluations of tensors output by map on the same input
|
# Two different evaluations of tensors output by map on the same input
|
||||||
# are identical because the same paramaters are used for the mappings.
|
# are identical because the same parameters are used for the mappings.
|
||||||
self.assertAllClose(mapped_x.eval(), mapped_x_copy.eval(), atol=0.001)
|
self.assertAllClose(mapped_x.eval(), mapped_x_copy.eval(), atol=0.001)
|
||||||
|
|
||||||
def testTwoMapperObjects(self):
|
def testTwoMapperObjects(self):
|
||||||
|
@ -618,7 +618,7 @@ def identity(labeled_tensor, name=None):
|
|||||||
def slice_function(labeled_tensor, selection, name=None):
|
def slice_function(labeled_tensor, selection, name=None):
|
||||||
"""Slice out a subset of the tensor.
|
"""Slice out a subset of the tensor.
|
||||||
|
|
||||||
This is an analogue of tf.slice.
|
This is an analog of tf.slice.
|
||||||
For example:
|
For example:
|
||||||
>>> tensor = tf.reshape(tf.range(0, 6), [3, 2])
|
>>> tensor = tf.reshape(tf.range(0, 6), [3, 2])
|
||||||
>>> labeled_tensor = lt.LabeledTensor(tensor, ['a', ('b', ['foo', 'bar'])])
|
>>> labeled_tensor = lt.LabeledTensor(tensor, ['a', ('b', ['foo', 'bar'])])
|
||||||
|
@ -28,7 +28,7 @@ from tensorflow.python.platform import test
|
|||||||
|
|
||||||
class RegressionTargetColumnTest(test.TestCase):
|
class RegressionTargetColumnTest(test.TestCase):
|
||||||
|
|
||||||
# TODO(zakaria): test multilabel regresssion.
|
# TODO(zakaria): test multilabel regression.
|
||||||
def testRegression(self):
|
def testRegression(self):
|
||||||
target_column = target_column_lib.regression_target()
|
target_column = target_column_lib.regression_target()
|
||||||
with ops.Graph().as_default(), session.Session() as sess:
|
with ops.Graph().as_default(), session.Session() as sess:
|
||||||
|
@ -97,7 +97,7 @@ class TensorFlowDataFrame(df.DataFrame):
|
|||||||
graph: the `Graph` in which the `DataFrame` should be built.
|
graph: the `Graph` in which the `DataFrame` should be built.
|
||||||
session: the `Session` in which to run the columns of the `DataFrame`.
|
session: the `Session` in which to run the columns of the `DataFrame`.
|
||||||
start_queues: if true, queues will be started before running and halted
|
start_queues: if true, queues will be started before running and halted
|
||||||
after producting `n` batches.
|
after producing `n` batches.
|
||||||
initialize_variables: if true, variables will be initialized.
|
initialize_variables: if true, variables will be initialized.
|
||||||
**kwargs: Additional keyword arguments e.g. `num_epochs`.
|
**kwargs: Additional keyword arguments e.g. `num_epochs`.
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ SCIKIT_DECOUPLE_INSTRUCTIONS = (
|
|||||||
|
|
||||||
|
|
||||||
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
|
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
|
||||||
"""Verifies validity of co-existance of input arguments."""
|
"""Verifies validity of co-existence of input arguments."""
|
||||||
if input_fn is None:
|
if input_fn is None:
|
||||||
if x is None:
|
if x is None:
|
||||||
raise ValueError('Either x or input_fn must be provided.')
|
raise ValueError('Either x or input_fn must be provided.')
|
||||||
@ -358,7 +358,7 @@ class BaseEstimator(
|
|||||||
"""
|
"""
|
||||||
__metaclass__ = abc.ABCMeta
|
__metaclass__ = abc.ABCMeta
|
||||||
|
|
||||||
# Note that for Google users, this is overriden with
|
# Note that for Google users, this is overridden with
|
||||||
# learn_runner.EstimatorConfig.
|
# learn_runner.EstimatorConfig.
|
||||||
# TODO(wicke): Remove this once launcher takes over config functionality
|
# TODO(wicke): Remove this once launcher takes over config functionality
|
||||||
_Config = run_config.RunConfig # pylint: disable=invalid-name
|
_Config = run_config.RunConfig # pylint: disable=invalid-name
|
||||||
@ -703,7 +703,7 @@ class BaseEstimator(
|
|||||||
def _get_eval_ops(self, features, labels, metrics):
|
def _get_eval_ops(self, features, labels, metrics):
|
||||||
"""Method that builds model graph and returns evaluation ops.
|
"""Method that builds model graph and returns evaluation ops.
|
||||||
|
|
||||||
Expected to be overriden by sub-classes that require custom support.
|
Expected to be overridden by sub-classes that require custom support.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
features: `Tensor` or `dict` of `Tensor` objects.
|
features: `Tensor` or `dict` of `Tensor` objects.
|
||||||
@ -1149,7 +1149,7 @@ class Estimator(BaseEstimator):
|
|||||||
def _get_train_ops(self, features, labels):
|
def _get_train_ops(self, features, labels):
|
||||||
"""Method that builds model graph and returns trainer ops.
|
"""Method that builds model graph and returns trainer ops.
|
||||||
|
|
||||||
Expected to be overriden by sub-classes that require custom support.
|
Expected to be overridden by sub-classes that require custom support.
|
||||||
This implementation uses `model_fn` passed as parameter to constructor to
|
This implementation uses `model_fn` passed as parameter to constructor to
|
||||||
build model.
|
build model.
|
||||||
|
|
||||||
@ -1165,7 +1165,7 @@ class Estimator(BaseEstimator):
|
|||||||
def _get_eval_ops(self, features, labels, metrics):
|
def _get_eval_ops(self, features, labels, metrics):
|
||||||
"""Method that builds model graph and returns evaluation ops.
|
"""Method that builds model graph and returns evaluation ops.
|
||||||
|
|
||||||
Expected to be overriden by sub-classes that require custom support.
|
Expected to be overridden by sub-classes that require custom support.
|
||||||
This implementation uses `model_fn` passed as parameter to constructor to
|
This implementation uses `model_fn` passed as parameter to constructor to
|
||||||
build model.
|
build model.
|
||||||
|
|
||||||
@ -1204,7 +1204,7 @@ class Estimator(BaseEstimator):
|
|||||||
def _get_predict_ops(self, features):
|
def _get_predict_ops(self, features):
|
||||||
"""Method that builds model graph and returns prediction ops.
|
"""Method that builds model graph and returns prediction ops.
|
||||||
|
|
||||||
Expected to be overriden by sub-classes that require custom support.
|
Expected to be overridden by sub-classes that require custom support.
|
||||||
This implementation uses `model_fn` passed as parameter to constructor to
|
This implementation uses `model_fn` passed as parameter to constructor to
|
||||||
build model.
|
build model.
|
||||||
|
|
||||||
|
@ -620,7 +620,7 @@ def _create_model_fn_ops(features,
|
|||||||
weight_tensor = _weight_tensor(features, weight_column_name)
|
weight_tensor = _weight_tensor(features, weight_column_name)
|
||||||
loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)
|
loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)
|
||||||
# Uses the deprecated API to set the tag explicitly.
|
# Uses the deprecated API to set the tag explicitly.
|
||||||
# Without it, trianing and eval losses will show up in different graphs.
|
# Without it, training and eval losses will show up in different graphs.
|
||||||
logging_ops.scalar_summary(
|
logging_ops.scalar_summary(
|
||||||
_summary_key(head_name, mkey.LOSS), weighted_average_loss)
|
_summary_key(head_name, mkey.LOSS), weighted_average_loss)
|
||||||
|
|
||||||
@ -1141,7 +1141,7 @@ def _to_labels_tensor(labels, label_name):
|
|||||||
"""Returns label as a tensor.
|
"""Returns label as a tensor.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
labels: Label `Tensor` or `SparseTensor` or a dict containig labels.
|
labels: Label `Tensor` or `SparseTensor` or a dict containing labels.
|
||||||
label_name: Label name if labels is a dict.
|
label_name: Label name if labels is a dict.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@ -1575,7 +1575,7 @@ class _MultiHead(Head):
|
|||||||
Args:
|
Args:
|
||||||
all_model_fn_ops: list of ModelFnOps for the individual heads.
|
all_model_fn_ops: list of ModelFnOps for the individual heads.
|
||||||
train_op_fn: Function to create train op. See `create_model_fn_ops`
|
train_op_fn: Function to create train op. See `create_model_fn_ops`
|
||||||
documentaion for more details.
|
documentation for more details.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
ModelFnOps that merges all heads for TRAIN.
|
ModelFnOps that merges all heads for TRAIN.
|
||||||
|
@ -119,7 +119,7 @@ def apply_dropout(cells, dropout_keep_probabilities, random_seed=None):
|
|||||||
"""
|
"""
|
||||||
if len(dropout_keep_probabilities) != len(cells) + 1:
|
if len(dropout_keep_probabilities) != len(cells) + 1:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
'The number of dropout probabilites must be one greater than the '
|
'The number of dropout probabilities must be one greater than the '
|
||||||
'number of cells. Got {} cells and {} dropout probabilities.'.format(
|
'number of cells. Got {} cells and {} dropout probabilities.'.format(
|
||||||
len(cells), len(dropout_keep_probabilities)))
|
len(cells), len(dropout_keep_probabilities)))
|
||||||
wrapped_cells = [
|
wrapped_cells = [
|
||||||
|
@ -309,7 +309,7 @@ class RunConfig(ClusterConfig, core_run_config.RunConfig):
|
|||||||
Args:
|
Args:
|
||||||
whitelist: A list of the string names of the properties uid should not
|
whitelist: A list of the string names of the properties uid should not
|
||||||
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
|
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
|
||||||
includes most properites user allowes to change.
|
includes most properties user allowes to change.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A uid string.
|
A uid string.
|
||||||
|
@ -53,7 +53,7 @@ class Experiment(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# TODO(ispir): remove delay_workers_by_global_step and make global step based
|
# TODO(ispir): remove delay_workers_by_global_step and make global step based
|
||||||
# waiting as only behaviour.
|
# waiting as only behavior.
|
||||||
@deprecated_args(
|
@deprecated_args(
|
||||||
"2016-10-23",
|
"2016-10-23",
|
||||||
"local_eval_frequency is deprecated as local_run will be renamed to "
|
"local_eval_frequency is deprecated as local_run will be renamed to "
|
||||||
@ -550,7 +550,7 @@ class Experiment(object):
|
|||||||
eval_result = None
|
eval_result = None
|
||||||
|
|
||||||
# Set the default value for train_steps_per_iteration, which will be
|
# Set the default value for train_steps_per_iteration, which will be
|
||||||
# overriden by other settings.
|
# overridden by other settings.
|
||||||
train_steps_per_iteration = 1000
|
train_steps_per_iteration = 1000
|
||||||
if self._train_steps_per_iteration is not None:
|
if self._train_steps_per_iteration is not None:
|
||||||
train_steps_per_iteration = self._train_steps_per_iteration
|
train_steps_per_iteration = self._train_steps_per_iteration
|
||||||
|
@ -155,7 +155,7 @@ def run(experiment_fn, output_dir=None, schedule=None, run_config=None,
|
|||||||
to create the `Estimator` (passed as `model_dir` to its constructor). It
|
to create the `Estimator` (passed as `model_dir` to its constructor). It
|
||||||
must return an `Experiment`. For this case, `run_config` and `hparams`
|
must return an `Experiment`. For this case, `run_config` and `hparams`
|
||||||
must be None.
|
must be None.
|
||||||
2) It accpets two arguments `run_config` and `hparams`, which should be
|
2) It accepts two arguments `run_config` and `hparams`, which should be
|
||||||
used to create the `Estimator` (`run_config` passed as `config` to its
|
used to create the `Estimator` (`run_config` passed as `config` to its
|
||||||
constructor; `hparams` used as the hyper-paremeters of the model).
|
constructor; `hparams` used as the hyper-paremeters of the model).
|
||||||
It must return an `Experiment`. For this case, `output_dir` must be None.
|
It must return an `Experiment`. For this case, `output_dir` must be None.
|
||||||
|
@ -140,7 +140,7 @@ def rnn_seq2seq(encoder_inputs,
|
|||||||
scope: Scope to use, if None new will be produced.
|
scope: Scope to use, if None new will be produced.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of tensors for outputs and states for trianing and sampling sub-graphs.
|
List of tensors for outputs and states for training and sampling sub-graphs.
|
||||||
"""
|
"""
|
||||||
with vs.variable_scope(scope or "rnn_seq2seq"):
|
with vs.variable_scope(scope or "rnn_seq2seq"):
|
||||||
_, last_enc_state = rnn.static_rnn(
|
_, last_enc_state = rnn.static_rnn(
|
||||||
|
@ -128,9 +128,9 @@ class CategoricalVocabulary(object):
|
|||||||
Class name.
|
Class name.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: if this vocabulary wasn't initalized with support_reverse.
|
ValueError: if this vocabulary wasn't initialized with support_reverse.
|
||||||
"""
|
"""
|
||||||
if not self._support_reverse:
|
if not self._support_reverse:
|
||||||
raise ValueError("This vocabulary wasn't initalized with "
|
raise ValueError("This vocabulary wasn't initialized with "
|
||||||
"support_reverse to support reverse() function.")
|
"support_reverse to support reverse() function.")
|
||||||
return self._reverse_mapping[class_id]
|
return self._reverse_mapping[class_id]
|
||||||
|
@ -49,7 +49,7 @@ class Trainable(object):
|
|||||||
steps: Number of steps for which to train model. If `None`, train forever.
|
steps: Number of steps for which to train model. If `None`, train forever.
|
||||||
'steps' works incrementally. If you call two times fit(steps=10) then
|
'steps' works incrementally. If you call two times fit(steps=10) then
|
||||||
training occurs in total 20 steps. If you don't want to have incremental
|
training occurs in total 20 steps. If you don't want to have incremental
|
||||||
behaviour please set `max_steps` instead. If set, `max_steps` must be
|
behavior please set `max_steps` instead. If set, `max_steps` must be
|
||||||
`None`.
|
`None`.
|
||||||
batch_size: minibatch size to use on the input, defaults to first
|
batch_size: minibatch size to use on the input, defaults to first
|
||||||
dimension of `x`. Must be `None` if `input_fn` is provided.
|
dimension of `x`. Must be `None` if `input_fn` is provided.
|
||||||
|
@ -89,7 +89,7 @@ def _export_graph(graph, saver, checkpoint_path, export_dir,
|
|||||||
def generic_signature_fn(examples, unused_features, predictions):
|
def generic_signature_fn(examples, unused_features, predictions):
|
||||||
"""Creates generic signature from given examples and predictions.
|
"""Creates generic signature from given examples and predictions.
|
||||||
|
|
||||||
This is needed for backward compatibility with default behaviour of
|
This is needed for backward compatibility with default behavior of
|
||||||
export_estimator.
|
export_estimator.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -309,7 +309,7 @@ def get_most_recent_export(export_dir_base):
|
|||||||
directories.
|
directories.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A gc.Path, whith is just a namedtuple of (path, export_version).
|
A gc.Path, with is just a namedtuple of (path, export_version).
|
||||||
"""
|
"""
|
||||||
select_filter = gc.largest_export_versions(1)
|
select_filter = gc.largest_export_versions(1)
|
||||||
results = select_filter(gc.get_paths(export_dir_base,
|
results = select_filter(gc.get_paths(export_dir_base,
|
||||||
|
@ -109,7 +109,7 @@ class SavedModelExportUtilsTest(test.TestCase):
|
|||||||
self.assertEqual(actual_signature_def, expected_signature_def)
|
self.assertEqual(actual_signature_def, expected_signature_def)
|
||||||
|
|
||||||
def test_build_standardized_signature_def_classification2(self):
|
def test_build_standardized_signature_def_classification2(self):
|
||||||
"""Tests multiple output tensors that include classes and probabilites."""
|
"""Tests multiple output tensors that include classes and probabilities."""
|
||||||
input_tensors = {
|
input_tensors = {
|
||||||
"input-1":
|
"input-1":
|
||||||
array_ops.placeholder(
|
array_ops.placeholder(
|
||||||
|
@ -837,7 +837,7 @@ class Seq2SeqTest(test.TestCase):
|
|||||||
# with variable_scope.variable_scope("new"):
|
# with variable_scope.variable_scope("new"):
|
||||||
# _, losses2 = SampleGRUSeq2Seq
|
# _, losses2 = SampleGRUSeq2Seq
|
||||||
# inp, out, weights, per_example_loss=True)
|
# inp, out, weights, per_example_loss=True)
|
||||||
# # First loss is scalar, the second one is a 1-dimensinal tensor.
|
# # First loss is scalar, the second one is a 1-dimensional tensor.
|
||||||
# self.assertEqual([], losses1[0].get_shape().as_list())
|
# self.assertEqual([], losses1[0].get_shape().as_list())
|
||||||
# self.assertEqual([None], losses2[0].get_shape().as_list())
|
# self.assertEqual([None], losses2[0].get_shape().as_list())
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ class MemoryStatsOpsTest(test_util.TensorFlowTestCase):
|
|||||||
# The memory for matrix "a" can be reused for matrix "d". Therefore, this
|
# The memory for matrix "a" can be reused for matrix "d". Therefore, this
|
||||||
# computation needs space for only three matrix plus some small overhead.
|
# computation needs space for only three matrix plus some small overhead.
|
||||||
def testChainOfMatmul(self):
|
def testChainOfMatmul(self):
|
||||||
# MaxBytesInUse is registerd on GPU only. See kernels/memory_stats_ops.cc.
|
# MaxBytesInUse is registered on GPU only. See kernels/memory_stats_ops.cc.
|
||||||
if not test.is_gpu_available():
|
if not test.is_gpu_available():
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -1507,7 +1507,7 @@ class StreamingAUCTest(test.TestCase):
|
|||||||
self.assertAlmostEqual(1, auc.eval(), 6)
|
self.assertAlmostEqual(1, auc.eval(), 6)
|
||||||
|
|
||||||
def np_auc(self, predictions, labels, weights):
|
def np_auc(self, predictions, labels, weights):
|
||||||
"""Computes the AUC explicitely using Numpy.
|
"""Computes the AUC explicitly using Numpy.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
predictions: an ndarray with shape [N].
|
predictions: an ndarray with shape [N].
|
||||||
|
@ -466,7 +466,7 @@ class GridLSTMCell(core_rnn_cell.RNNCell):
|
|||||||
state is clipped by this value prior to the cell output activation.
|
state is clipped by this value prior to the cell output activation.
|
||||||
initializer: (optional) The initializer to use for the weight and
|
initializer: (optional) The initializer to use for the weight and
|
||||||
projection matrices, default None.
|
projection matrices, default None.
|
||||||
num_unit_shards: (optional) int, defualt 1, How to split the weight
|
num_unit_shards: (optional) int, default 1, How to split the weight
|
||||||
matrix. If > 1,the weight matrix is stored across num_unit_shards.
|
matrix. If > 1,the weight matrix is stored across num_unit_shards.
|
||||||
forget_bias: (optional) float, default 1.0, The initial bias of the
|
forget_bias: (optional) float, default 1.0, The initial bias of the
|
||||||
forget gates, used to reduce the scale of forgetting at the beginning
|
forget gates, used to reduce the scale of forgetting at the beginning
|
||||||
@ -1809,12 +1809,12 @@ class PhasedLSTMCell(core_rnn_cell.RNNCell):
|
|||||||
period during which the gates are open.
|
period during which the gates are open.
|
||||||
trainable_ratio_on: bool, weather ratio_on is trainable.
|
trainable_ratio_on: bool, weather ratio_on is trainable.
|
||||||
period_init_min: float or scalar float Tensor. With value > 0.
|
period_init_min: float or scalar float Tensor. With value > 0.
|
||||||
Minimum value of the initalized period.
|
Minimum value of the initialized period.
|
||||||
The period values are initialized by drawing from the distribution:
|
The period values are initialized by drawing from the distribution:
|
||||||
e^U(log(period_init_min), log(period_init_max))
|
e^U(log(period_init_min), log(period_init_max))
|
||||||
Where U(.,.) is the uniform distribution.
|
Where U(.,.) is the uniform distribution.
|
||||||
period_init_max: float or scalar float Tensor.
|
period_init_max: float or scalar float Tensor.
|
||||||
With value > period_init_min. Maximum value of the initalized period.
|
With value > period_init_min. Maximum value of the initialized period.
|
||||||
reuse: (optional) Python boolean describing whether to reuse variables
|
reuse: (optional) Python boolean describing whether to reuse variables
|
||||||
in an existing scope. If not `True`, and the existing scope already has
|
in an existing scope. If not `True`, and the existing scope already has
|
||||||
the given variables, an error is raised.
|
the given variables, an error is raised.
|
||||||
|
@ -474,7 +474,7 @@ class AttentionWrapperState(
|
|||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A new `AttentionWrapperState` whose properties are the same as
|
A new `AttentionWrapperState` whose properties are the same as
|
||||||
this one, except any overriden properties as provided in `kwargs`.
|
this one, except any overridden properties as provided in `kwargs`.
|
||||||
"""
|
"""
|
||||||
return super(AttentionWrapperState, self)._replace(**kwargs)
|
return super(AttentionWrapperState, self)._replace(**kwargs)
|
||||||
|
|
||||||
|
@ -352,7 +352,7 @@ we can both ensure that each layer uses the same values and simplify the code:
|
|||||||
```
|
```
|
||||||
|
|
||||||
As the example illustrates, the use of arg_scope makes the code cleaner,
|
As the example illustrates, the use of arg_scope makes the code cleaner,
|
||||||
simpler and easier to maintain. Notice that while argument values are specifed
|
simpler and easier to maintain. Notice that while argument values are specified
|
||||||
in the arg_scope, they can be overwritten locally. In particular, while
|
in the arg_scope, they can be overwritten locally. In particular, while
|
||||||
the padding argument has been set to 'SAME', the second convolution overrides
|
the padding argument has been set to 'SAME', the second convolution overrides
|
||||||
it with the value of 'VALID'.
|
it with the value of 'VALID'.
|
||||||
|
@ -33,7 +33,7 @@ To read data using multiple readers simultaneous with shuffling:
|
|||||||
shuffle=True)
|
shuffle=True)
|
||||||
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
|
images, labels = pascal_voc_data_provider.get(['images', 'labels'])
|
||||||
|
|
||||||
Equivalently, one may request different fields of the same sample seperately:
|
Equivalently, one may request different fields of the same sample separately:
|
||||||
|
|
||||||
[images] = pascal_voc_data_provider.get(['images'])
|
[images] = pascal_voc_data_provider.get(['images'])
|
||||||
[labels] = pascal_voc_data_provider.get(['labels'])
|
[labels] = pascal_voc_data_provider.get(['labels'])
|
||||||
|
@ -40,7 +40,7 @@ def visualize_embeddings(summary_writer, config):
|
|||||||
"""Stores a config file used by the embedding projector.
|
"""Stores a config file used by the embedding projector.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
summary_writer: The summary writer used for writting events.
|
summary_writer: The summary writer used for writing events.
|
||||||
config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
|
config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
|
||||||
proto that holds the configuration for the projector such as paths to
|
proto that holds the configuration for the projector such as paths to
|
||||||
checkpoint files and metadata files for the embeddings. If
|
checkpoint files and metadata files for the embeddings. If
|
||||||
|
@ -46,7 +46,7 @@ class ProjectorApiTest(test.TestCase):
|
|||||||
writer = writer_lib.FileWriter(temp_dir)
|
writer = writer_lib.FileWriter(temp_dir)
|
||||||
projector.visualize_embeddings(writer, config)
|
projector.visualize_embeddings(writer, config)
|
||||||
|
|
||||||
# Read the configuratin from disk and make sure it matches the original.
|
# Read the configurations from disk and make sure it matches the original.
|
||||||
with gfile.GFile(os.path.join(temp_dir, 'projector_config.pbtxt')) as f:
|
with gfile.GFile(os.path.join(temp_dir, 'projector_config.pbtxt')) as f:
|
||||||
config2 = projector_config_pb2.ProjectorConfig()
|
config2 = projector_config_pb2.ProjectorConfig()
|
||||||
text_format.Parse(f.read(), config2)
|
text_format.Parse(f.read(), config2)
|
||||||
|
@ -370,7 +370,7 @@ def evaluate_repeatedly(checkpoint_dir,
|
|||||||
|
|
||||||
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
|
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
|
||||||
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
|
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
|
||||||
summaries run immedietly after the model checkpoint has been restored.
|
summaries run immediately after the model checkpoint has been restored.
|
||||||
|
|
||||||
Note that `evaluate_once` creates a local variable used to track the number of
|
Note that `evaluate_once` creates a local variable used to track the number of
|
||||||
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
|
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
|
||||||
|
@ -422,7 +422,7 @@ class HParams(object):
|
|||||||
elif issubclass(param_type, float):
|
elif issubclass(param_type, float):
|
||||||
typename = 'float'
|
typename = 'float'
|
||||||
else:
|
else:
|
||||||
raise ValueError('Unsupported paramter type: %s' % str(param_type))
|
raise ValueError('Unsupported parameter type: %s' % str(param_type))
|
||||||
|
|
||||||
suffix = 'list' if is_list else 'value'
|
suffix = 'list' if is_list else 'value'
|
||||||
return '_'.join([typename, suffix])
|
return '_'.join([typename, suffix])
|
||||||
|
@ -344,7 +344,7 @@ def _prepare_sequence_inputs(inputs, states):
|
|||||||
key = _check_rank(inputs.key, 0)
|
key = _check_rank(inputs.key, 0)
|
||||||
|
|
||||||
if length.dtype != dtypes.int32:
|
if length.dtype != dtypes.int32:
|
||||||
raise TypeError("length dtype must be int32, but recieved: %s" %
|
raise TypeError("length dtype must be int32, but received: %s" %
|
||||||
length.dtype)
|
length.dtype)
|
||||||
if key.dtype != dtypes.string:
|
if key.dtype != dtypes.string:
|
||||||
raise TypeError("key dtype must be string, but received: %s" % key.dtype)
|
raise TypeError("key dtype must be string, but received: %s" % key.dtype)
|
||||||
@ -1673,7 +1673,7 @@ def _move_sparse_tensor_out_context(input_context, input_sequences, num_unroll):
|
|||||||
shape = array_ops.concat(
|
shape = array_ops.concat(
|
||||||
[array_ops.expand_dims(value_length, 0), sp_tensor.dense_shape], 0)
|
[array_ops.expand_dims(value_length, 0), sp_tensor.dense_shape], 0)
|
||||||
|
|
||||||
# Construct new indices by mutliplying old ones and prepending [0, n).
|
# Construct new indices by multiplying old ones and prepending [0, n).
|
||||||
# First multiply indices n times along a newly created 0-dimension.
|
# First multiply indices n times along a newly created 0-dimension.
|
||||||
multiplied_indices = array_ops.tile(
|
multiplied_indices = array_ops.tile(
|
||||||
array_ops.expand_dims(sp_tensor.indices, 0),
|
array_ops.expand_dims(sp_tensor.indices, 0),
|
||||||
|
@ -83,7 +83,7 @@ bool IsConstantFoldable(const Node* n,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns the constant foldable nodes in `nodes` in topological order.
|
// Returns the constant foldable nodes in `nodes` in topological order.
|
||||||
// Populates `constant_control_deps` with the non-constant control depedencies
|
// Populates `constant_control_deps` with the non-constant control dependencies
|
||||||
// of each constant node.
|
// of each constant node.
|
||||||
void FindConstantFoldableNodes(
|
void FindConstantFoldableNodes(
|
||||||
const Graph* graph, ConstantFoldingOptions opts, std::vector<Node*>* nodes,
|
const Graph* graph, ConstantFoldingOptions opts, std::vector<Node*>* nodes,
|
||||||
|
@ -74,8 +74,8 @@ class Executor {
|
|||||||
//
|
//
|
||||||
// RunAsync() uses "cancellation_manager", if not nullptr, to
|
// RunAsync() uses "cancellation_manager", if not nullptr, to
|
||||||
// register callbacks that should be called if the graph computation
|
// register callbacks that should be called if the graph computation
|
||||||
// is cancelled. Note that the callbacks merely unblock any
|
// is canceled. Note that the callbacks merely unblock any
|
||||||
// long-running computation, and a cancelled step will terminate by
|
// long-running computation, and a canceled step will terminate by
|
||||||
// returning/calling the DoneCallback as usual.
|
// returning/calling the DoneCallback as usual.
|
||||||
//
|
//
|
||||||
// RunAsync() dispatches closures to "runner". Typically, "runner"
|
// RunAsync() dispatches closures to "runner". Typically, "runner"
|
||||||
|
@ -47,7 +47,7 @@ class SessionFactory {
|
|||||||
// Old sessions may continue to have side-effects on resources not in
|
// Old sessions may continue to have side-effects on resources not in
|
||||||
// containers listed in "containers", and thus may affect future
|
// containers listed in "containers", and thus may affect future
|
||||||
// sessions' results in ways that are hard to predict. Thus, if well-defined
|
// sessions' results in ways that are hard to predict. Thus, if well-defined
|
||||||
// behaviour is desired, is it recommended that all containers be listed in
|
// behavior is desired, is it recommended that all containers be listed in
|
||||||
// "containers".
|
// "containers".
|
||||||
//
|
//
|
||||||
// If the "containers" vector is empty, the default container is assumed.
|
// If the "containers" vector is empty, the default container is assumed.
|
||||||
|
@ -243,7 +243,7 @@ Status SimpleGraphExecutionState::InitBaseGraph(
|
|||||||
session_options_->config.graph_options().rewrite_options();
|
session_options_->config.graph_options().rewrite_options();
|
||||||
|
|
||||||
if (grappler::MetaOptimizerEnabled(rewrite_options)) {
|
if (grappler::MetaOptimizerEnabled(rewrite_options)) {
|
||||||
// Adding this functionalty in steps. The first step is to make sure
|
// Adding this functionality in steps. The first step is to make sure
|
||||||
// we don't break dependencies. The second step will be to turn the
|
// we don't break dependencies. The second step will be to turn the
|
||||||
// functionality on by default.
|
// functionality on by default.
|
||||||
grappler::GrapplerItem item;
|
grappler::GrapplerItem item;
|
||||||
|
@ -660,7 +660,7 @@ Status SimplePlacer::Run() {
|
|||||||
if (!edge->IsControlEdge() &&
|
if (!edge->IsControlEdge() &&
|
||||||
(IsRefType(node->input_type(edge->dst_input())) ||
|
(IsRefType(node->input_type(edge->dst_input())) ||
|
||||||
node->input_type(edge->dst_input()) == DT_RESOURCE)) {
|
node->input_type(edge->dst_input()) == DT_RESOURCE)) {
|
||||||
// If both the source node and this node have paritally
|
// If both the source node and this node have partially
|
||||||
// specified a device, then 'node's device should be
|
// specified a device, then 'node's device should be
|
||||||
// cleared: the reference edge forces 'node' to be on the
|
// cleared: the reference edge forces 'node' to be on the
|
||||||
// same device as the source node.
|
// same device as the source node.
|
||||||
|
@ -20,7 +20,7 @@ package tensorflow;
|
|||||||
import "tensorflow/core/util/event.proto";
|
import "tensorflow/core/util/event.proto";
|
||||||
|
|
||||||
// Reply message from EventListener to the client, i.e., to the source of the
|
// Reply message from EventListener to the client, i.e., to the source of the
|
||||||
// Event protocal buffers, e.g., debug ops inserted by a debugged runtime to a
|
// Event protocol buffers, e.g., debug ops inserted by a debugged runtime to a
|
||||||
// TensorFlow graph being executed.
|
// TensorFlow graph being executed.
|
||||||
message EventReply {
|
message EventReply {
|
||||||
message DebugOpStateChange {
|
message DebugOpStateChange {
|
||||||
|
@ -108,9 +108,9 @@ class GraphMgr {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct Item : public core::RefCounted {
|
struct Item : public core::RefCounted {
|
||||||
// TOOD(zhifengc): Keeps a copy of the original graph if the need arises.
|
// TODO(zhifengc): Keeps a copy of the original graph if the need arises.
|
||||||
// TOOD(zhifengc): Stats, updated by multiple runs potentially.
|
// TODO(zhifengc): Stats, updated by multiple runs potentially.
|
||||||
// TOOD(zhifengc): Dup-detection. Ensure step_id only run once.
|
// TODO(zhifengc): Dup-detection. Ensure step_id only run once.
|
||||||
~Item() override;
|
~Item() override;
|
||||||
|
|
||||||
// Session handle.
|
// Session handle.
|
||||||
@ -126,7 +126,7 @@ class GraphMgr {
|
|||||||
// has a root executor which may call into the runtime library.
|
// has a root executor which may call into the runtime library.
|
||||||
std::vector<ExecutionUnit> units;
|
std::vector<ExecutionUnit> units;
|
||||||
|
|
||||||
// Used to deresgister a cost model when cost model is requried in graph
|
// Used to deresgister a cost model when cost model is required in graph
|
||||||
// manager.
|
// manager.
|
||||||
GraphMgr* graph_mgr;
|
GraphMgr* graph_mgr;
|
||||||
};
|
};
|
||||||
@ -157,7 +157,7 @@ class GraphMgr {
|
|||||||
CancellationManager* cancellation_manager,
|
CancellationManager* cancellation_manager,
|
||||||
StatusCallback done);
|
StatusCallback done);
|
||||||
|
|
||||||
// Don't attempt to process cost models unless explicitely requested for at
|
// Don't attempt to process cost models unless explicitly requested for at
|
||||||
// least one of the items.
|
// least one of the items.
|
||||||
bool skip_cost_models_ = true;
|
bool skip_cost_models_ = true;
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ limitations under the License.
|
|||||||
// A Master discovers remote devices on-demand and keeps track of
|
// A Master discovers remote devices on-demand and keeps track of
|
||||||
// statistics of those remote devices.
|
// statistics of those remote devices.
|
||||||
//
|
//
|
||||||
// Each session analyses the graph, places nodes across available
|
// Each session analyzes the graph, places nodes across available
|
||||||
// devices, and ultimately drives the graph computation by initiating
|
// devices, and ultimately drives the graph computation by initiating
|
||||||
// RunGraph on the workers.
|
// RunGraph on the workers.
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ class UntypedCall : public core::RefCounted {
|
|||||||
virtual void RequestReceived(Service* service, bool ok) = 0;
|
virtual void RequestReceived(Service* service, bool ok) = 0;
|
||||||
|
|
||||||
// This method will be called either (i) when the server is notified
|
// This method will be called either (i) when the server is notified
|
||||||
// that the request has been cancelled, or (ii) when the request completes
|
// that the request has been canceled, or (ii) when the request completes
|
||||||
// normally. The implementation should distinguish these cases by querying
|
// normally. The implementation should distinguish these cases by querying
|
||||||
// the `grpc::ServerContext` associated with the request.
|
// the `grpc::ServerContext` associated with the request.
|
||||||
virtual void RequestCancelled(Service* service, bool ok) = 0;
|
virtual void RequestCancelled(Service* service, bool ok) = 0;
|
||||||
@ -175,7 +175,7 @@ class Call : public UntypedCall<Service> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Registers `callback` as the function that should be called if and when this
|
// Registers `callback` as the function that should be called if and when this
|
||||||
// call is cancelled by the client.
|
// call is canceled by the client.
|
||||||
void SetCancelCallback(std::function<void()> callback) {
|
void SetCancelCallback(std::function<void()> callback) {
|
||||||
mutex_lock l(mu_);
|
mutex_lock l(mu_);
|
||||||
cancel_callback_ = std::move(callback);
|
cancel_callback_ = std::move(callback);
|
||||||
|
@ -25,7 +25,7 @@ limitations under the License.
|
|||||||
// A GrpcMasterService discovers remote devices in the background and
|
// A GrpcMasterService discovers remote devices in the background and
|
||||||
// keeps track of statistics of those remote devices.
|
// keeps track of statistics of those remote devices.
|
||||||
//
|
//
|
||||||
// Each session analyses the graph, places nodes across available
|
// Each session analyzes the graph, places nodes across available
|
||||||
// devices, and ultimately drives the graph computation by initiating
|
// devices, and ultimately drives the graph computation by initiating
|
||||||
// RunGraph on workers.
|
// RunGraph on workers.
|
||||||
#include "tensorflow/core/distributed_runtime/rpc/grpc_master_service.h"
|
#include "tensorflow/core/distributed_runtime/rpc/grpc_master_service.h"
|
||||||
|
@ -517,7 +517,7 @@ TEST(GrpcSessionTest, Error) {
|
|||||||
//
|
//
|
||||||
// Subgraph for "b" sleeps at the node "b_delay". When the sleep
|
// Subgraph for "b" sleeps at the node "b_delay". When the sleep
|
||||||
// finishes, the subgraph "b" will continue execution till it
|
// finishes, the subgraph "b" will continue execution till it
|
||||||
// notices that it is cancelled. Meanwhile, subgraph's executor
|
// notices that it is canceled. Meanwhile, subgraph's executor
|
||||||
// and its related state (registered ops) should still be alive.
|
// and its related state (registered ops) should still be alive.
|
||||||
auto b = test::graph::Constant(&g, Tensor());
|
auto b = test::graph::Constant(&g, Tensor());
|
||||||
b->set_assigned_device_name(dev_b);
|
b->set_assigned_device_name(dev_b);
|
||||||
|
@ -35,7 +35,7 @@ void WorkerCacheLogger::SetLogging(bool v) {
|
|||||||
++want_logging_count_;
|
++want_logging_count_;
|
||||||
} else {
|
} else {
|
||||||
--want_logging_count_;
|
--want_logging_count_;
|
||||||
// If RPCs get cancelled, it may be possible for the count
|
// If RPCs get canceled, it may be possible for the count
|
||||||
// to go negative. This should not be a fatal error, since
|
// to go negative. This should not be a fatal error, since
|
||||||
// logging is non-critical.
|
// logging is non-critical.
|
||||||
if (want_logging_count_ < 0) want_logging_count_ = 0;
|
if (want_logging_count_ < 0) want_logging_count_ = 0;
|
||||||
|
@ -36,7 +36,7 @@ namespace tensorflow {
|
|||||||
// CancellationManager::get_cancellation_token.
|
// CancellationManager::get_cancellation_token.
|
||||||
typedef int64 CancellationToken;
|
typedef int64 CancellationToken;
|
||||||
|
|
||||||
// A callback that is invoked when a step is cancelled.
|
// A callback that is invoked when a step is canceled.
|
||||||
//
|
//
|
||||||
// NOTE(mrry): See caveats about CancelCallback implementations in the
|
// NOTE(mrry): See caveats about CancelCallback implementations in the
|
||||||
// comment for CancellationManager::RegisterCallback.
|
// comment for CancellationManager::RegisterCallback.
|
||||||
|
@ -163,7 +163,7 @@ REGISTER_OP("HasDefaultType")
|
|||||||
|
|
||||||
// This verifies that a function using an op before a type attr (with
|
// This verifies that a function using an op before a type attr (with
|
||||||
// a default) is added, still works. This is important for backwards
|
// a default) is added, still works. This is important for backwards
|
||||||
// compatibilty.
|
// compatibility.
|
||||||
TEST(TFunc, MissingTypeAttr) {
|
TEST(TFunc, MissingTypeAttr) {
|
||||||
auto fdef = FDH::Create(
|
auto fdef = FDH::Create(
|
||||||
// Name
|
// Name
|
||||||
@ -1021,7 +1021,7 @@ TEST(FunctionLibraryDefinitionTest, AddLibrary) {
|
|||||||
EXPECT_EQ(s.error_message(),
|
EXPECT_EQ(s.error_message(),
|
||||||
"Gradient for function 'XTimesTwo' already exists.");
|
"Gradient for function 'XTimesTwo' already exists.");
|
||||||
|
|
||||||
// No conflicing functions or gradients OK
|
// No conflicting functions or gradients OK
|
||||||
proto.Clear();
|
proto.Clear();
|
||||||
*proto.add_function() = test::function::XTimesFour();
|
*proto.add_function() = test::function::XTimesFour();
|
||||||
grad.set_function_name(test::function::XTimes16().signature().name());
|
grad.set_function_name(test::function::XTimes16().signature().name());
|
||||||
|
@ -51,7 +51,7 @@ extern Status ConvertGraphDefToGraph(const GraphConstructorOptions& opts,
|
|||||||
// On error, returns non-OK and leaves *g unmodified.
|
// On error, returns non-OK and leaves *g unmodified.
|
||||||
//
|
//
|
||||||
// "shape_refiner" can be null. It should be non-null if the caller
|
// "shape_refiner" can be null. It should be non-null if the caller
|
||||||
// intends to add additonal nodes to the graph after the import. This
|
// intends to add additional nodes to the graph after the import. This
|
||||||
// allows the caller to validate shapes of those nodes (since
|
// allows the caller to validate shapes of those nodes (since
|
||||||
// ShapeRefiner::AddNode must be called in topological order).
|
// ShapeRefiner::AddNode must be called in topological order).
|
||||||
//
|
//
|
||||||
|
@ -40,7 +40,7 @@ class AnalyticalCostEstimator : public CostEstimator {
|
|||||||
explicit AnalyticalCostEstimator(Cluster* cluster, bool use_static_shapes);
|
explicit AnalyticalCostEstimator(Cluster* cluster, bool use_static_shapes);
|
||||||
~AnalyticalCostEstimator() override {}
|
~AnalyticalCostEstimator() override {}
|
||||||
|
|
||||||
// Initalizes the estimator for the specified grappler item.
|
// Initializes the estimator for the specified grappler item.
|
||||||
// This implementation always returns OK.
|
// This implementation always returns OK.
|
||||||
Status Initialize(const GrapplerItem& item) override;
|
Status Initialize(const GrapplerItem& item) override;
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ class CostEstimator {
|
|||||||
public:
|
public:
|
||||||
virtual ~CostEstimator() {}
|
virtual ~CostEstimator() {}
|
||||||
|
|
||||||
// Initalizes the estimator for the specified grappler item.
|
// Initializes the estimator for the specified grappler item.
|
||||||
// The estimator shouldn't be used if this function returns any status other
|
// The estimator shouldn't be used if this function returns any status other
|
||||||
// that OK.
|
// that OK.
|
||||||
virtual Status Initialize(const GrapplerItem& item) = 0;
|
virtual Status Initialize(const GrapplerItem& item) = 0;
|
||||||
|
@ -50,7 +50,7 @@ class MeasuringCostEstimator : public CostEstimator {
|
|||||||
int measurement_threads);
|
int measurement_threads);
|
||||||
~MeasuringCostEstimator() override {}
|
~MeasuringCostEstimator() override {}
|
||||||
|
|
||||||
// Initalizes the estimator for the specified grappler item.
|
// Initializes the estimator for the specified grappler item.
|
||||||
// This implementation always returns OK.
|
// This implementation always returns OK.
|
||||||
Status Initialize(const GrapplerItem& item) override;
|
Status Initialize(const GrapplerItem& item) override;
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ class OpLevelCostEstimator {
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
// Returns an estimate of device performance (in billions of operations
|
// Returns an estimate of device performance (in billions of operations
|
||||||
// executed per second) and memory bandwith (in GigaBytes/second) for the
|
// executed per second) and memory bandwidth (in GigaBytes/second) for the
|
||||||
// specified device.
|
// specified device.
|
||||||
virtual std::pair<double, double> GetDeviceInfo(
|
virtual std::pair<double, double> GetDeviceInfo(
|
||||||
const DeviceProperties& device) const;
|
const DeviceProperties& device) const;
|
||||||
|
@ -46,7 +46,7 @@ Status ModelPruner::Optimize(Cluster* cluster, const GrapplerItem& item,
|
|||||||
if (nodes_to_preserve.find(node.name()) != nodes_to_preserve.end()) {
|
if (nodes_to_preserve.find(node.name()) != nodes_to_preserve.end()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// Don't remove nodes that are explicitely placed.
|
// Don't remove nodes that are explicitly placed.
|
||||||
if (!node.device().empty()) {
|
if (!node.device().empty()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ namespace tensorflow {
|
|||||||
namespace grappler {
|
namespace grappler {
|
||||||
|
|
||||||
// Prune a model to make it more efficient:
|
// Prune a model to make it more efficient:
|
||||||
// * Remove unecessary operations.
|
// * Remove unnecessary operations.
|
||||||
// * Optimize gradient computations.
|
// * Optimize gradient computations.
|
||||||
class ModelPruner : public GraphOptimizer {
|
class ModelPruner : public GraphOptimizer {
|
||||||
public:
|
public:
|
||||||
|
@ -34,7 +34,7 @@ class NodeMap {
|
|||||||
NodeDef* GetNode(const string& name);
|
NodeDef* GetNode(const string& name);
|
||||||
std::set<NodeDef*> GetOutputs(const string& node_name);
|
std::set<NodeDef*> GetOutputs(const string& node_name);
|
||||||
// This method doesn't record the outputs of the added node; the outputs need
|
// This method doesn't record the outputs of the added node; the outputs need
|
||||||
// to be explictly added by the AddOutput method.
|
// to be explicitly added by the AddOutput method.
|
||||||
void AddNode(const string& name, NodeDef* node);
|
void AddNode(const string& name, NodeDef* node);
|
||||||
void AddOutput(const string& node, const string& output);
|
void AddOutput(const string& node, const string& output);
|
||||||
void UpdateOutput(const string& node, const string& old_output,
|
void UpdateOutput(const string& node, const string& old_output,
|
||||||
|
@ -50,7 +50,7 @@ template <typename From, typename To>
|
|||||||
struct scalar_cast_op<std::complex<From>, To> {
|
struct scalar_cast_op<std::complex<From>, To> {
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE To
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE To
|
||||||
operator()(const std::complex<From>& a) const {
|
operator()(const std::complex<From>& a) const {
|
||||||
// Replicate numpy behaviour of returning just the real part
|
// Replicate numpy behavior of returning just the real part
|
||||||
return static_cast<To>(a.real());
|
return static_cast<To>(a.real());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -59,7 +59,7 @@ template <typename From, typename To>
|
|||||||
struct scalar_cast_op<From, std::complex<To>> {
|
struct scalar_cast_op<From, std::complex<To>> {
|
||||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<To> operator()(
|
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::complex<To> operator()(
|
||||||
const From& a) const {
|
const From& a) const {
|
||||||
// Replicate numpy behaviour of setting the imaginary part to 0
|
// Replicate numpy behavior of setting the imaginary part to 0
|
||||||
return std::complex<To>(static_cast<To>(a), To(0));
|
return std::complex<To>(static_cast<To>(a), To(0));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -713,7 +713,7 @@ class FusedResizeConv2DUsingGemmOp : public OpKernel {
|
|||||||
const int32 before =
|
const int32 before =
|
||||||
paddings_matrix(d, 0); // Pad before existing elements.
|
paddings_matrix(d, 0); // Pad before existing elements.
|
||||||
const int32 after =
|
const int32 after =
|
||||||
paddings_matrix(d, 1); // Pad after exisitng elements.
|
paddings_matrix(d, 1); // Pad after existing elements.
|
||||||
OP_REQUIRES(context, before >= 0 && after >= 0,
|
OP_REQUIRES(context, before >= 0 && after >= 0,
|
||||||
errors::InvalidArgument("paddings must be non-negative: ",
|
errors::InvalidArgument("paddings must be non-negative: ",
|
||||||
before, " ", after));
|
before, " ", after));
|
||||||
|
@ -116,7 +116,7 @@ class CudaSolver {
|
|||||||
// Launches a memcpy of solver status data specified by dev_lapack_info from
|
// Launches a memcpy of solver status data specified by dev_lapack_info from
|
||||||
// device to the host, and asynchronously invokes the given callback when the
|
// device to the host, and asynchronously invokes the given callback when the
|
||||||
// copy is complete. The first Status argument to the callback will be
|
// copy is complete. The first Status argument to the callback will be
|
||||||
// Status::OK if all lapack infos retrived are zero, otherwise an error status
|
// Status::OK if all lapack infos retrieved are zero, otherwise an error status
|
||||||
// is given. The second argument contains a host-side copy of the entire set
|
// is given. The second argument contains a host-side copy of the entire set
|
||||||
// of infos retrieved, and can be used for generating detailed error messages.
|
// of infos retrieved, and can be used for generating detailed error messages.
|
||||||
Status CopyLapackInfoToHostAsync(
|
Status CopyLapackInfoToHostAsync(
|
||||||
|
@ -26,7 +26,7 @@ limitations under the License.
|
|||||||
|
|
||||||
namespace tensorflow {
|
namespace tensorflow {
|
||||||
|
|
||||||
// DeepConv2D is a Conv2D implementation specialzied for deep convolutions (i.e
|
// DeepConv2D is a Conv2D implementation specialized for deep convolutions (i.e
|
||||||
// large 'in_depth' and 'out_depth' product. See cost models below for details).
|
// large 'in_depth' and 'out_depth' product. See cost models below for details).
|
||||||
//
|
//
|
||||||
// DeepConv2D is implemented by computing the following equation:
|
// DeepConv2D is implemented by computing the following equation:
|
||||||
|
@ -22,7 +22,7 @@ namespace tensorflow {
|
|||||||
|
|
||||||
class OpKernelContext;
|
class OpKernelContext;
|
||||||
|
|
||||||
// DeepConv2D is a Conv2D implementation specialzied for deep (i.e. large
|
// DeepConv2D is a Conv2D implementation specialized for deep (i.e. large
|
||||||
// in_depth * out_depth product) convolutions (see deep_conv2d.cc for details).
|
// in_depth * out_depth product) convolutions (see deep_conv2d.cc for details).
|
||||||
|
|
||||||
// DeepConv2DTransform is an interface for implementing transforms for
|
// DeepConv2DTransform is an interface for implementing transforms for
|
||||||
|
@ -44,7 +44,7 @@ class HingeLossUpdater : public DualLossUpdater {
|
|||||||
const double current_dual, const double wx,
|
const double current_dual, const double wx,
|
||||||
const double weighted_example_norm) const final {
|
const double weighted_example_norm) const final {
|
||||||
// Intutitvely there are 3 cases:
|
// Intutitvely there are 3 cases:
|
||||||
// a. new optimal value of the dual variable falls withing the admissible
|
// a. new optimal value of the dual variable falls within the admissible
|
||||||
// range [0, 1]. In this case we set new dual to this value.
|
// range [0, 1]. In this case we set new dual to this value.
|
||||||
// b. new optimal value is < 0. Then, because of convexity, the optimal
|
// b. new optimal value is < 0. Then, because of convexity, the optimal
|
||||||
// valid value for new dual = 0
|
// valid value for new dual = 0
|
||||||
|
@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
==============================================================================*/
|
==============================================================================*/
|
||||||
|
|
||||||
// This is a helper struct to package up the input and ouput
|
// This is a helper struct to package up the input and output
|
||||||
// parameters of an image resizer (the height, widths, etc.). To
|
// parameters of an image resizer (the height, widths, etc.). To
|
||||||
// reduce code duplication and ensure consistency across the different
|
// reduce code duplication and ensure consistency across the different
|
||||||
// resizers, it performs the input validation.
|
// resizers, it performs the input validation.
|
||||||
|
@ -238,7 +238,7 @@ private:
|
|||||||
{
|
{
|
||||||
IncompleteTuple empty(dtypes_.size());
|
IncompleteTuple empty(dtypes_.size());
|
||||||
|
|
||||||
// Initialise empty tuple with given dta
|
// Initialize empty tuple with given dta
|
||||||
for(std::size_t i = 0; i < findices.dimension(0); ++i)
|
for(std::size_t i = 0; i < findices.dimension(0); ++i)
|
||||||
{
|
{
|
||||||
std::size_t index = findices(i);
|
std::size_t index = findices(i);
|
||||||
|
@ -64,7 +64,7 @@ bool IsSupportedAndEnabled();
|
|||||||
// sum((a_data[i, l] + offset_a) * (b_data[l, j] + offset_b)) : l in [0, k)
|
// sum((a_data[i, l] + offset_a) * (b_data[l, j] + offset_b)) : l in [0, k)
|
||||||
//
|
//
|
||||||
// If transpose_a is false the lhs operand has row major layout, otherwise
|
// If transpose_a is false the lhs operand has row major layout, otherwise
|
||||||
// column major. Similarily transpose_b describes the layout of the rhs operand.
|
// column major. Similarly transpose_b describes the layout of the rhs operand.
|
||||||
// lda, ldb, and ldc are the strides of the lhs operand, rhs operand and the
|
// lda, ldb, and ldc are the strides of the lhs operand, rhs operand and the
|
||||||
// result arrays.
|
// result arrays.
|
||||||
void QuantizedGemm(OpKernelContext* context, bool transpose_a, bool transpose_b,
|
void QuantizedGemm(OpKernelContext* context, bool transpose_a, bool transpose_b,
|
||||||
|
@ -295,7 +295,7 @@ class MklConv2DCustomBackpropInputOp : public OpKernel {
|
|||||||
dnnDelete_F32(mkl_convert_filter);
|
dnnDelete_F32(mkl_convert_filter);
|
||||||
} else {
|
} else {
|
||||||
// If we do not need any layout conversion for filter, then
|
// If we do not need any layout conversion for filter, then
|
||||||
// we direclty assign input filter to resources[].
|
// we directly assign input filter to resources[].
|
||||||
conv_res[dnnResourceFilter] =
|
conv_res[dnnResourceFilter] =
|
||||||
static_cast<void*>(const_cast<T*>(filter.flat<T>().data()));
|
static_cast<void*>(const_cast<T*>(filter.flat<T>().data()));
|
||||||
}
|
}
|
||||||
|
@ -306,7 +306,7 @@ class PaddedBatchDatasetOp : public OpKernel {
|
|||||||
const TensorShape& element_shape =
|
const TensorShape& element_shape =
|
||||||
batch_elements[i][component_index].shape();
|
batch_elements[i][component_index].shape();
|
||||||
// TODO(mrry): Perform this check in the shape function if
|
// TODO(mrry): Perform this check in the shape function if
|
||||||
// enough static information is avaiable to do so.
|
// enough static information is available to do so.
|
||||||
if (element_shape.dims() != padded_shape.dims()) {
|
if (element_shape.dims() != padded_shape.dims()) {
|
||||||
return errors::InvalidArgument(
|
return errors::InvalidArgument(
|
||||||
"All elements in a batch must have the same rank as the "
|
"All elements in a batch must have the same rank as the "
|
||||||
|
@ -80,7 +80,7 @@ float QuantizedToFloat(T input, float range_min, float range_max) {
|
|||||||
static_cast<int64>(Eigen::NumTraits<T>::lowest());
|
static_cast<int64>(Eigen::NumTraits<T>::lowest());
|
||||||
const double offset_input = static_cast<double>(input) - lowest_quantized;
|
const double offset_input = static_cast<double>(input) - lowest_quantized;
|
||||||
// For compatibility with DEQUANTIZE_WITH_EIGEN, we should convert
|
// For compatibility with DEQUANTIZE_WITH_EIGEN, we should convert
|
||||||
// range_scale to a float, otherwise range_min_rounded might be slighly
|
// range_scale to a float, otherwise range_min_rounded might be slightly
|
||||||
// different.
|
// different.
|
||||||
const double range_min_rounded =
|
const double range_min_rounded =
|
||||||
round(range_min / static_cast<float>(range_scale)) *
|
round(range_min / static_cast<float>(range_scale)) *
|
||||||
|
@ -35,7 +35,7 @@ class SmoothHingeLossUpdater : public DualLossUpdater {
|
|||||||
const double current_dual, const double wx,
|
const double current_dual, const double wx,
|
||||||
const double weighted_example_norm) const final {
|
const double weighted_example_norm) const final {
|
||||||
// Intutitvely there are 3 cases:
|
// Intutitvely there are 3 cases:
|
||||||
// a. new optimal value of the dual variable falls withing the admissible
|
// a. new optimal value of the dual variable falls within the admissible
|
||||||
// range [0, 1]. In this case we set new dual to this value.
|
// range [0, 1]. In this case we set new dual to this value.
|
||||||
// b. new optimal value is < 0. Then, because of convexity, the optimal
|
// b. new optimal value is < 0. Then, because of convexity, the optimal
|
||||||
// valid value for new dual = 0
|
// valid value for new dual = 0
|
||||||
|
@ -24,7 +24,7 @@ limitations under the License.
|
|||||||
namespace tensorflow {
|
namespace tensorflow {
|
||||||
namespace functor {
|
namespace functor {
|
||||||
|
|
||||||
// TOOD(zongheng): this should be a general functor that powers SparseAdd and
|
// TODO(zongheng): this should be a general functor that powers SparseAdd and
|
||||||
// ScatterNd ops. It should be moved to its own head file, once the other ops
|
// ScatterNd ops. It should be moved to its own head file, once the other ops
|
||||||
// are implemented.
|
// are implemented.
|
||||||
template <typename Device, typename T, typename Index, int NDIMS,
|
template <typename Device, typename T, typename Index, int NDIMS,
|
||||||
|
@ -541,7 +541,7 @@ class optional : private internal_optional::optional_data<T>,
|
|||||||
// opt.emplace(arg1,arg2,arg3); (Constructs Foo(arg1,arg2,arg3))
|
// opt.emplace(arg1,arg2,arg3); (Constructs Foo(arg1,arg2,arg3))
|
||||||
//
|
//
|
||||||
// If the optional is non-empty, and the `args` refer to subobjects of the
|
// If the optional is non-empty, and the `args` refer to subobjects of the
|
||||||
// current object, then behaviour is undefined. This is because the current
|
// current object, then behavior is undefined. This is because the current
|
||||||
// object will be destructed before the new object is constructed with `args`.
|
// object will be destructed before the new object is constructed with `args`.
|
||||||
//
|
//
|
||||||
template <typename... Args,
|
template <typename... Args,
|
||||||
@ -586,7 +586,7 @@ class optional : private internal_optional::optional_data<T>,
|
|||||||
|
|
||||||
// [optional.observe], observers
|
// [optional.observe], observers
|
||||||
// You may use `*opt`, and `opt->m`, to access the underlying T value and T's
|
// You may use `*opt`, and `opt->m`, to access the underlying T value and T's
|
||||||
// member `m`, respectively. If the optional is empty, behaviour is
|
// member `m`, respectively. If the optional is empty, behavior is
|
||||||
// undefined.
|
// undefined.
|
||||||
constexpr const T* operator->() const { return this->pointer(); }
|
constexpr const T* operator->() const { return this->pointer(); }
|
||||||
T* operator->() {
|
T* operator->() {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user