Fix typos (#12195)
This commit is contained in:
parent
ab96f41fb4
commit
08bbfa187f
tensorflow
contrib
core
python/framework
tools/docker
@ -286,14 +286,14 @@ class CudnnRNNTestSaveRestore(TensorFlowTestCase):
|
|||||||
save_path = os.path.join(self.get_temp_dir(),
|
save_path = os.path.join(self.get_temp_dir(),
|
||||||
"save-restore-variable-test")
|
"save-restore-variable-test")
|
||||||
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
|
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
|
||||||
# Passing graph explictly, otherwise an old sess would be reused.
|
# Passing graph explicitly, otherwise an old sess would be reused.
|
||||||
with self.test_session(
|
with self.test_session(
|
||||||
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
params_v = sess.run(params)
|
params_v = sess.run(params)
|
||||||
val = saver.save(sess, save_path)
|
val = saver.save(sess, save_path)
|
||||||
self.assertEqual(save_path, val)
|
self.assertEqual(save_path, val)
|
||||||
# Passing graph explictly, otherwise an old sess would be reused.
|
# Passing graph explicitly, otherwise an old sess would be reused.
|
||||||
with self.test_session(
|
with self.test_session(
|
||||||
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
||||||
reset_params = state_ops.assign(
|
reset_params = state_ops.assign(
|
||||||
@ -328,14 +328,14 @@ class CudnnRNNTestSaveRestore(TensorFlowTestCase):
|
|||||||
save_path = os.path.join(self.get_temp_dir(),
|
save_path = os.path.join(self.get_temp_dir(),
|
||||||
"save-restore-variable-test")
|
"save-restore-variable-test")
|
||||||
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
|
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
|
||||||
# Passing graph explictly, otherwise an old sess would be reused.
|
# Passing graph explicitly, otherwise an old sess would be reused.
|
||||||
with self.test_session(
|
with self.test_session(
|
||||||
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
params_v = sess.run(param_vars)
|
params_v = sess.run(param_vars)
|
||||||
val = saver.save(sess, save_path)
|
val = saver.save(sess, save_path)
|
||||||
self.assertEqual(save_path, val)
|
self.assertEqual(save_path, val)
|
||||||
# Passing graph explictly, otherwise an old sess would be reused.
|
# Passing graph explicitly, otherwise an old sess would be reused.
|
||||||
with self.test_session(
|
with self.test_session(
|
||||||
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
||||||
reset_params = [
|
reset_params = [
|
||||||
@ -398,14 +398,14 @@ class CudnnRNNTestSaveRestore(TensorFlowTestCase):
|
|||||||
params=params,
|
params=params,
|
||||||
is_training=False)
|
is_training=False)
|
||||||
total_sum = sum(map(math_ops.reduce_sum, outputs))
|
total_sum = sum(map(math_ops.reduce_sum, outputs))
|
||||||
# Passing graph explictly, otherwise an old sess would be reused.
|
# Passing graph explicitly, otherwise an old sess would be reused.
|
||||||
with self.test_session(
|
with self.test_session(
|
||||||
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
||||||
sess.run(variables.global_variables_initializer())
|
sess.run(variables.global_variables_initializer())
|
||||||
total_sum_v = sess.run(total_sum)
|
total_sum_v = sess.run(total_sum)
|
||||||
val = saver.save(sess, save_path)
|
val = saver.save(sess, save_path)
|
||||||
self.assertEqual(save_path, val)
|
self.assertEqual(save_path, val)
|
||||||
# Passing graph explictly, otherwise an old sess would be reused.
|
# Passing graph explicitly, otherwise an old sess would be reused.
|
||||||
with self.test_session(
|
with self.test_session(
|
||||||
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
use_gpu=True, graph=ops.get_default_graph()) as sess:
|
||||||
reset_params = state_ops.assign(
|
reset_params = state_ops.assign(
|
||||||
|
@ -61,7 +61,7 @@ void MPIUtils::InitMPI() {
|
|||||||
MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &number_of_procs));
|
MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &number_of_procs));
|
||||||
MPI_CHECK(MPI_Get_processor_name(my_host_name, &len));
|
MPI_CHECK(MPI_Get_processor_name(my_host_name, &len));
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"MPI Environment initialised. Process id: %d Total processes: %d "
|
"MPI Environment initialized. Process id: %d Total processes: %d "
|
||||||
"|| Hostname: %s \n",
|
"|| Hostname: %s \n",
|
||||||
proc_id, number_of_procs, my_host_name);
|
proc_id, number_of_procs, my_host_name);
|
||||||
}
|
}
|
||||||
|
@ -682,7 +682,7 @@ Status SimplePlacer::Run() {
|
|||||||
int dst_root_id = colocation_graph.FindRoot(dst->id());
|
int dst_root_id = colocation_graph.FindRoot(dst->id());
|
||||||
auto& src_root = colocation_graph.members_[src_root_id];
|
auto& src_root = colocation_graph.members_[src_root_id];
|
||||||
auto& dst_root = colocation_graph.members_[dst_root_id];
|
auto& dst_root = colocation_graph.members_[dst_root_id];
|
||||||
// If both the source node and this node have paritally
|
// If both the source node and this node have partially
|
||||||
// specified a device, then 'node's device should be
|
// specified a device, then 'node's device should be
|
||||||
// cleared: the reference edge forces 'node' to be on the
|
// cleared: the reference edge forces 'node' to be on the
|
||||||
// same device as the source node.
|
// same device as the source node.
|
||||||
|
@ -30,7 +30,7 @@ message RewriterConfig {
|
|||||||
// Fold constants (default is OFF)
|
// Fold constants (default is OFF)
|
||||||
Toggle constant_folding = 3;
|
Toggle constant_folding = 3;
|
||||||
|
|
||||||
// If true, don't remove unecessary ops from the graph
|
// If true, don't remove unnecessary ops from the graph
|
||||||
bool disable_model_pruning = 2;
|
bool disable_model_pruning = 2;
|
||||||
|
|
||||||
enum MemOptType {
|
enum MemOptType {
|
||||||
|
@ -584,7 +584,7 @@ class _FuncGraph(ops.Graph):
|
|||||||
_FuncGraph overrides ops.Graph's create_op() so that we can keep
|
_FuncGraph overrides ops.Graph's create_op() so that we can keep
|
||||||
track of all inputs into every op created inside the function. If
|
track of all inputs into every op created inside the function. If
|
||||||
any input is from other graphs, we keep track of it in self.capture
|
any input is from other graphs, we keep track of it in self.capture
|
||||||
and substitue the input with a place holder.
|
and substitute the input with a place holder.
|
||||||
|
|
||||||
Each captured input's corresponding place holder is converted into a
|
Each captured input's corresponding place holder is converted into a
|
||||||
function argument and the caller passes in the captured tensor.
|
function argument and the caller passes in the captured tensor.
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
# Paramterized build and test for TensorFlow Docker images.
|
# Parameterized build and test for TensorFlow Docker images.
|
||||||
#
|
#
|
||||||
# Usage:
|
# Usage:
|
||||||
# parameterized_docker_build.sh
|
# parameterized_docker_build.sh
|
||||||
|
Loading…
Reference in New Issue
Block a user