diff --git a/tensorflow/compiler/xla/tests/test_utils_test.cc b/tensorflow/compiler/xla/tests/test_utils_test.cc index 4337aa4bf9a..1fa43c65445 100644 --- a/tensorflow/compiler/xla/tests/test_utils_test.cc +++ b/tensorflow/compiler/xla/tests/test_utils_test.cc @@ -258,7 +258,7 @@ XLA_TEST_F(TestUtilsTest, MakeFakeArgumentsForGather) { auto module = ParseHloString(R"( HloModule Test -ENTRY %module(paramater.0: f32[200,100,300], parameter.1: s32[10,2]) -> +ENTRY %module(parameter.0: f32[200,100,300], parameter.1: s32[10,2]) -> f32[10,300] { %parameter.0 = f32[200,100,300] parameter(0) %parameter.1 = s32[10,2] parameter(1) diff --git a/tensorflow/contrib/distribute/python/parameter_server_strategy.py b/tensorflow/contrib/distribute/python/parameter_server_strategy.py index be863322256..fe8ec10b11d 100644 --- a/tensorflow/contrib/distribute/python/parameter_server_strategy.py +++ b/tensorflow/contrib/distribute/python/parameter_server_strategy.py @@ -46,7 +46,7 @@ class ParameterServerStrategy(distribute_lib.DistributionStrategy): becomes local training where variables are assigned to local CPU or the only GPU. When each worker has more than one GPU, operations will be replicated on these GPUs. In both cases, operations are replicated but variables are not and - these workers share a common view for which paramater server a variable is + these workers share a common view for which parameter server a variable is assigned to. This class assumes between-graph replication will be used and works on a graph diff --git a/tensorflow/lite/toco/dump_graphviz.cc b/tensorflow/lite/toco/dump_graphviz.cc index ad69e4f7b7a..95a34a7e4fb 100644 --- a/tensorflow/lite/toco/dump_graphviz.cc +++ b/tensorflow/lite/toco/dump_graphviz.cc @@ -37,7 +37,7 @@ using toco::port::StringF; namespace toco { namespace { -// 'nslimit' is a graphviz (dot) paramater that limits the iterations during +// 'nslimit' is a graphviz (dot) parameter that limits the iterations during // the layout phase. Omitting it allows infinite iterations, causing some // complex graphs to never finish. A value of 125 produces good graphs // while allowing complex graphs to finish. diff --git a/tensorflow/python/keras/layers/kernelized.py b/tensorflow/python/keras/layers/kernelized.py index 799d09c395f..6cb446dd71c 100644 --- a/tensorflow/python/keras/layers/kernelized.py +++ b/tensorflow/python/keras/layers/kernelized.py @@ -112,7 +112,7 @@ class RandomFourierFeatures(base_layer.Layer): definitions above). When provided, it should be a positive float. If None, the implementation chooses a default value (1.0 typically). Both the approximation error of the kernel and the classification quality are - sensitive to this parameter. If trainable is set to True, this paramater + sensitive to this parameter. If trainable is set to True, this parameter is learned end-to-end during training and the provided value serves as an initialization value. NOTE: When this layer is used to map the initial features and then the