diff --git a/tensorflow/contrib/cmake/README.md b/tensorflow/contrib/cmake/README.md index a2fbc11383a..6d4c0eb79d8 100644 --- a/tensorflow/contrib/cmake/README.md +++ b/tensorflow/contrib/cmake/README.md @@ -49,7 +49,7 @@ Change to your working directory: Where *C:\Path\to* is the path to your real working directory. -Create a folder where Tensorflow headers/libraries/binaries will be installed +Create a folder where TensorFlow headers/libraries/binaries will be installed after they are built: C:\Path\to>mkdir install @@ -83,7 +83,7 @@ Go to the project folder: C:\Path\to>cd tensorflow C:\Path\to\tensorflow> -Now go to *tensorflow\contrib\cmake* folder in Tensorflow's contrib sources: +Now go to *tensorflow\contrib\cmake* folder in TensorFlow's contrib sources: C:\Path\to\tensorflow>cd tensorflow\contrib\cmake C:\Path\to\tensorflow\tensorflow\contrib\cmake> @@ -101,7 +101,7 @@ and [Visual Studio](http://www.cmake.org/cmake/help/latest/manual/cmake-generators.7.html#visual-studio-generators) generators. -We will use shadow building to separate the temporary files from the Tensorflow +We will use shadow building to separate the temporary files from the TensorFlow source code. Create a temporary *build* folder and change your working directory to it: @@ -143,7 +143,7 @@ It will generate *Visual Studio* solution file *tensorflow.sln* in current directory. If the *gmock* directory does not exist, and/or you do not want to build -Tensorflow unit tests, you need to add *cmake* command argument +TensorFlow unit tests, you need to add *cmake* command argument `-Dtensorflow_BUILD_TESTS=OFF` to disable testing. Compiling @@ -219,7 +219,7 @@ If all tests are passed, safely continue. Installing ========== -To install Tensorflow to the specified *install* folder: +To install TensorFlow to the specified *install* folder: [...]\contrib\cmake\build\release>nmake install @@ -232,7 +232,7 @@ It sounds not so strange and it works. This will create the following folders under the *install* location: * bin - that contains tensorflow binaries; - * include - that contains C++ headers and Tensorflow *.proto files; + * include - that contains C++ headers and TensorFlow *.proto files; * lib - that contains linking libraries and *CMake* configuration files for *tensorflow* package. @@ -251,7 +251,7 @@ should link against release libtensorflow.lib library. DLLs vs. static linking ======================= -Static linking is now the default for the Tensorflow Buffer libraries. Due to +Static linking is now the default for the TensorFlow Buffer libraries. Due to issues with Win32's use of a separate heap for each DLL, as well as binary compatibility issues between different versions of MSVC's STL library, it is recommended that you use static linkage only. However, it is possible to @@ -270,7 +270,7 @@ compatibility between releases, so it is likely that future versions of these libraries will *not* be usable as drop-in replacements. If your project is itself a DLL intended for use by third-party software, we -recommend that you do NOT expose Tensorflow objects in your library's +recommend that you do NOT expose TensorFlow objects in your library's public interface, and that you statically link them into your library. Notes on Compiler Warnings diff --git a/tensorflow/contrib/ios_examples/camera/tensorflow_utils.mm b/tensorflow/contrib/ios_examples/camera/tensorflow_utils.mm index 1df912f6e21..7a5dc31a222 100644 --- a/tensorflow/contrib/ios_examples/camera/tensorflow_utils.mm +++ b/tensorflow/contrib/ios_examples/camera/tensorflow_utils.mm @@ -123,7 +123,7 @@ tensorflow::Status LoadModel(NSString* file_name, NSString* file_type, tensorflow::Session* session_pointer = nullptr; tensorflow::Status session_status = tensorflow::NewSession(options, &session_pointer); if (!session_status.ok()) { - LOG(ERROR) << "Could not create Tensorflow Session: " << session_status; + LOG(ERROR) << "Could not create TensorFlow Session: " << session_status; return session_status; } session->reset(session_pointer); @@ -149,7 +149,7 @@ tensorflow::Status LoadModel(NSString* file_name, NSString* file_type, LOG(INFO) << "Creating session."; tensorflow::Status create_status = (*session)->Create(tensorflow_graph); if (!create_status.ok()) { - LOG(ERROR) << "Could not create Tensorflow Graph: " << create_status; + LOG(ERROR) << "Could not create TensorFlow Graph: " << create_status; return create_status; } diff --git a/tensorflow/contrib/ios_examples/simple/RunModelViewController.mm b/tensorflow/contrib/ios_examples/simple/RunModelViewController.mm index 2ebb197399f..68e6bf8668b 100644 --- a/tensorflow/contrib/ios_examples/simple/RunModelViewController.mm +++ b/tensorflow/contrib/ios_examples/simple/RunModelViewController.mm @@ -157,7 +157,7 @@ NSString* RunInferenceOnImage() { LOG(INFO) << "Creating session."; tensorflow::Status s = session->Create(tensorflow_graph); if (!s.ok()) { - LOG(ERROR) << "Could not create Tensorflow Graph: " << s; + LOG(ERROR) << "Could not create TensorFlow Graph: " << s; return @""; } diff --git a/tensorflow/contrib/learn/python/learn/models.py b/tensorflow/contrib/learn/python/learn/models.py index 4bbb6e023df..d48fa20fb4a 100644 --- a/tensorflow/contrib/learn/python/learn/models.py +++ b/tensorflow/contrib/learn/python/learn/models.py @@ -224,7 +224,7 @@ def get_autoencoder_model(hidden_units, target_predictor_fn, return dnn_autoencoder_estimator -## This will be in Tensorflow 0.7. +## This will be in TensorFlow 0.7. ## TODO(ilblackdragon): Clean this up when it's released @@ -328,7 +328,7 @@ def bidirectional_rnn(cell_fw, return outputs, array_ops_.concat(1, [state_fw, state_bw]) -# End of Tensorflow 0.7 +# End of TensorFlow 0.7 def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional, diff --git a/tensorflow/contrib/session_bundle/BUILD b/tensorflow/contrib/session_bundle/BUILD index 8f63ef5d730..b2557e4b510 100644 --- a/tensorflow/contrib/session_bundle/BUILD +++ b/tensorflow/contrib/session_bundle/BUILD @@ -1,4 +1,4 @@ -# Description: Tensorflow Serving session bundle. +# Description: TensorFlow Serving session bundle. package( default_visibility = ["//visibility:public"], diff --git a/tensorflow/contrib/session_bundle/README.md b/tensorflow/contrib/session_bundle/README.md index d9b6c0f9a28..defa7767c5b 100644 --- a/tensorflow/contrib/session_bundle/README.md +++ b/tensorflow/contrib/session_bundle/README.md @@ -40,7 +40,7 @@ These exports have the following properties, ## Python exporting code The [`Exporter`](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/session_bundle/exporter.py) -class can be used to export a model in the above format from a Tensorflow python +class can be used to export a model in the above format from a TensorFlow python binary. ## C++ initialization code diff --git a/tensorflow/contrib/session_bundle/example/BUILD b/tensorflow/contrib/session_bundle/example/BUILD index 80e22e5d44b..c2844ae321e 100644 --- a/tensorflow/contrib/session_bundle/example/BUILD +++ b/tensorflow/contrib/session_bundle/example/BUILD @@ -1,4 +1,4 @@ -# Description: Tensorflow Serving session_bundle example. +# Description: TensorFlow Serving session_bundle example. package( default_visibility = ["//tensorflow/contrib/session_bundle:__subpackages__"], @@ -13,12 +13,12 @@ exports_files(["LICENSE"]) filegroup( name = "all_files", srcs = glob( - ["**/*"], - exclude = [ - "**/METADATA", - "**/OWNERS", - "g3doc/sitemap.md", - ], + ["**/*"], + exclude = [ + "**/METADATA", + "**/OWNERS", + "g3doc/sitemap.md", + ], ), visibility = ["//visibility:public"], ) @@ -26,27 +26,27 @@ filegroup( py_binary( name = "export_half_plus_two", srcs = [ - "export_half_plus_two.py", + "export_half_plus_two.py", ], srcs_version = "PY2AND3", deps = [ - "//tensorflow:tensorflow_py", - "//tensorflow/contrib/session_bundle:exporter", + "//tensorflow:tensorflow_py", + "//tensorflow/contrib/session_bundle:exporter", ], ) genrule( name = "half_plus_two", outs = [ - "half_plus_two/00000123/export.meta", - "half_plus_two/00000123/export-00000-of-00001", + "half_plus_two/00000123/export.meta", + "half_plus_two/00000123/export-00000-of-00001", ], cmd = - "rm -rf /tmp/half_plus_two; " + - "$(PYTHON_BIN_PATH) $(locations :export_half_plus_two); " + - "cp -r /tmp/half_plus_two/* $(@D)/half_plus_two", + "rm -rf /tmp/half_plus_two; " + + "$(PYTHON_BIN_PATH) $(locations :export_half_plus_two); " + + "cp -r /tmp/half_plus_two/* $(@D)/half_plus_two", tools = [ - ":export_half_plus_two", + ":export_half_plus_two", ], visibility = ["//visibility:public"], ) diff --git a/tensorflow/contrib/session_bundle/session_bundle_test.cc b/tensorflow/contrib/session_bundle/session_bundle_test.cc index 5ecbb32f883..f215df4493e 100644 --- a/tensorflow/contrib/session_bundle/session_bundle_test.cc +++ b/tensorflow/contrib/session_bundle/session_bundle_test.cc @@ -124,7 +124,7 @@ void BasicTest(const string& export_path) { outputs[0], test::AsTensor<float>({2, 2.5, 3, 3.5}, TensorShape({4, 1}))); } -TEST(LoadSessionBundleFromPath, BasicTensorflowContrib) { +TEST(LoadSessionBundleFromPath, BasicTensorFlowContrib) { const string export_path = test_util::TestSrcDirPath( "session_bundle/example/half_plus_two/00000123"); BasicTest(export_path); diff --git a/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py b/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py index 06dd697b3e0..cd052576044 100644 --- a/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py +++ b/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py @@ -271,7 +271,7 @@ class Image(ItemHandler): class TFExampleDecoder(data_decoder.DataDecoder): - """A decoder for Tensorflow Examples. + """A decoder for TensorFlow Examples. Decoding Example proto buffers is comprised of two stages: (1) Example parsing and (2) tensor manipulation. diff --git a/tensorflow/contrib/slim/python/slim/evaluation.py b/tensorflow/contrib/slim/python/slim/evaluation.py index dde9e690ddb..4668b030a7e 100644 --- a/tensorflow/contrib/slim/python/slim/evaluation.py +++ b/tensorflow/contrib/slim/python/slim/evaluation.py @@ -181,7 +181,7 @@ def evaluation(sess, written out using a summary writer. Args: - sess: The current Tensorflow `Session`. + sess: The current TensorFlow `Session`. num_evals: The number of times to execute `eval_op`. init_op: An operation run at the beginning of evaluation. init_op_feed_dict: A feed dictionary to use when executing `init_op`. diff --git a/tensorflow/contrib/tensor_forest/BUILD b/tensorflow/contrib/tensor_forest/BUILD index 054c97d4b1a..94785c9d3b7 100644 --- a/tensorflow/contrib/tensor_forest/BUILD +++ b/tensorflow/contrib/tensor_forest/BUILD @@ -1,4 +1,4 @@ -# Tensorflow code for training random forests. +# TensorFlow code for training random forests. licenses(["notice"]) # Apache 2.0 exports_files(["LICENSE"]) diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD index 2754525c54b..82bc4fafe3b 100644 --- a/tensorflow/core/BUILD +++ b/tensorflow/core/BUILD @@ -627,7 +627,7 @@ cc_library( alwayslink = 1, ) -# Full Tensorflow library with operator support. Use this unless reducing +# Full TensorFlow library with operator support. Use this unless reducing # binary size (by packaging a reduced operator set) is a concern. cc_library( name = "android_tensorflow_lib", @@ -706,7 +706,7 @@ filegroup( visibility = ["//visibility:public"], ) -# Portable library providing testing functionality for Tensorflow. +# Portable library providing testing functionality for TensorFlow. cc_library( name = "android_tensorflow_test_lib", testonly = 1, diff --git a/tensorflow/core/example/feature_util.h b/tensorflow/core/example/feature_util.h index ad122f530d7..048930d2356 100644 --- a/tensorflow/core/example/feature_util.h +++ b/tensorflow/core/example/feature_util.h @@ -15,7 +15,7 @@ limitations under the License. // A set of lightweight wrappers which simplify access to Example features. // -// Tensorflow Example proto uses associative maps on top of oneof fields. +// TensorFlow Example proto uses associative maps on top of oneof fields. // So accessing feature values is not very convenient. // // For example, to read a first value of integer feature "tag": diff --git a/tensorflow/examples/android/BUILD b/tensorflow/examples/android/BUILD index 1636bea7c47..1fc587100d4 100644 --- a/tensorflow/examples/android/BUILD +++ b/tensorflow/examples/android/BUILD @@ -1,5 +1,5 @@ # Description: -# Tensorflow camera demo app for Android. +# TensorFlow camera demo app for Android. package(default_visibility = ["//visibility:public"]) diff --git a/tensorflow/examples/android/README.md b/tensorflow/examples/android/README.md index fb737f7004e..a9985a797a2 100644 --- a/tensorflow/examples/android/README.md +++ b/tensorflow/examples/android/README.md @@ -1,6 +1,6 @@ -# Tensorflow Android Camera Demo +# TensorFlow Android Camera Demo -This folder contains a simple camera-based demo application utilizing Tensorflow. +This folder contains a simple camera-based demo application utilizing TensorFlow. ## Description @@ -76,5 +76,5 @@ errors may not be obvious if the app halts immediately, so if you installed with bazel and the app doesn't come up, then the easiest thing to do is try installing with adb. -Once the app is installed it will be named "Tensorflow Demo" and have the orange -Tensorflow logo as its icon. +Once the app is installed it will be named "TensorFlow Demo" and have the orange +TensorFlow logo as its icon. diff --git a/tensorflow/examples/android/jni/tensorflow_jni.cc b/tensorflow/examples/android/jni/tensorflow_jni.cc index f00355e8876..a651dfb295e 100644 --- a/tensorflow/examples/android/jni/tensorflow_jni.cc +++ b/tensorflow/examples/android/jni/tensorflow_jni.cc @@ -41,7 +41,7 @@ limitations under the License. using namespace tensorflow; -// Global variables that holds the Tensorflow classifier. +// Global variables that holds the TensorFlow classifier. static std::unique_ptr<tensorflow::Session> session; static std::vector<std::string> g_label_strings; @@ -85,7 +85,7 @@ inline static int64 CurrentThreadTimeUs() { return tv.tv_sec * 1000000 + tv.tv_usec; } -JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)( +JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorFlow)( JNIEnv* env, jobject thiz, jobject java_asset_manager, jstring model, jstring labels, jint num_classes, jint model_input_size, jint image_mean, jfloat image_std, jstring input_name, jstring output_name) { @@ -112,7 +112,7 @@ JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)( g_output_name.reset( new std::string(env->GetStringUTFChars(output_name, NULL))); - LOG(INFO) << "Loading Tensorflow."; + LOG(INFO) << "Loading TensorFlow."; LOG(INFO) << "Making new SessionOptions."; tensorflow::SessionOptions options; @@ -137,12 +137,12 @@ JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)( LOG(INFO) << "Creating session."; tensorflow::Status s = session->Create(tensorflow_graph); if (!s.ok()) { - LOG(FATAL) << "Could not create Tensorflow Graph: " << s; + LOG(FATAL) << "Could not create TensorFlow Graph: " << s; } // Clear the proto to save memory space. tensorflow_graph.Clear(); - LOG(INFO) << "Tensorflow graph loaded from: " << model_cstr; + LOG(INFO) << "TensorFlow graph loaded from: " << model_cstr; // Read the label list ReadFileToVector(asset_manager, labels_cstr, &g_label_strings); @@ -237,7 +237,7 @@ static std::string ClassifyImage(const RGBA* const bitmap_src) { auto input_tensor_mapped = input_tensor.tensor<float, 4>(); - LOG(INFO) << "Tensorflow: Copying Data."; + LOG(INFO) << "TensorFlow: Copying Data."; for (int i = 0; i < g_tensorflow_input_size; ++i) { const RGBA* src = bitmap_src + i * g_tensorflow_input_size; for (int j = 0; j < g_tensorflow_input_size; ++j) { diff --git a/tensorflow/examples/android/jni/tensorflow_jni.h b/tensorflow/examples/android/jni/tensorflow_jni.h index 0c63f4504d0..b83698b3b22 100644 --- a/tensorflow/examples/android/jni/tensorflow_jni.h +++ b/tensorflow/examples/android/jni/tensorflow_jni.h @@ -14,8 +14,8 @@ limitations under the License. ==============================================================================*/ // The methods are exposed to Java to allow for interaction with the native -// Tensorflow code. See -// tensorflow/examples/android/src/org/tensorflow/TensorflowClassifier.java +// TensorFlow code. See +// tensorflow/examples/android/src/org/tensorflow/TensorFlowClassifier.java // for the Java counterparts. #ifndef ORG_TENSORFLOW_JNI_TENSORFLOW_JNI_H_ // NOLINT @@ -28,9 +28,9 @@ extern "C" { #endif // __cplusplus #define TENSORFLOW_METHOD(METHOD_NAME) \ - Java_org_tensorflow_demo_TensorflowClassifier_##METHOD_NAME // NOLINT + Java_org_tensorflow_demo_TensorFlowClassifier_##METHOD_NAME // NOLINT -JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)( +JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorFlow)( JNIEnv* env, jobject thiz, jobject java_asset_manager, jstring model, jstring labels, jint num_classes, jint model_input_size, jint image_mean, jfloat image_std, jstring input_name, jstring output_name); diff --git a/tensorflow/examples/android/res/values/base-strings.xml b/tensorflow/examples/android/res/values/base-strings.xml index e6c3bc7fa0a..e6bcc6292b3 100644 --- a/tensorflow/examples/android/res/values/base-strings.xml +++ b/tensorflow/examples/android/res/values/base-strings.xml @@ -16,5 +16,5 @@ --> <resources> - <string name="app_name">Tensorflow Demo</string> + <string name="app_name">TensorFlow Demo</string> </resources> diff --git a/tensorflow/examples/label_image/BUILD b/tensorflow/examples/label_image/BUILD index facc61ae130..021372fa7b8 100644 --- a/tensorflow/examples/label_image/BUILD +++ b/tensorflow/examples/label_image/BUILD @@ -1,5 +1,5 @@ # Description: -# Tensorflow C++ inference example for labeling images. +# TensorFlow C++ inference example for labeling images. package(default_visibility = ["//tensorflow:internal"]) diff --git a/tensorflow/examples/label_image/README.md b/tensorflow/examples/label_image/README.md index 1f40e8bef0d..e427ff78453 100644 --- a/tensorflow/examples/label_image/README.md +++ b/tensorflow/examples/label_image/README.md @@ -1,4 +1,4 @@ -# Tensorflow C++ Image Recognition Demo +# TensorFlow C++ Image Recognition Demo This example shows how you can load a pre-trained TensorFlow network and use it to recognize objects in images. diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.SparseTensor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.SparseTensor.md index f945e714e67..f8be4c5e268 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.SparseTensor.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard3/tf.SparseTensor.md @@ -1,6 +1,6 @@ Represents a sparse tensor. -Tensorflow represents a sparse tensor as three separate dense tensors: +TensorFlow represents a sparse tensor as three separate dense tensors: `indices`, `values`, and `shape`. In Python, the three tensors are collected into a `SparseTensor` class for ease of use. If you have separate `indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor` diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.Supervisor.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.Supervisor.md index b3d17eac2db..6631412c756 100644 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.Supervisor.md +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard6/tf.train.Supervisor.md @@ -1,7 +1,7 @@ A training helper that checkpoints models and computes summaries. The Supervisor is a small wrapper around a `Coordinator`, a `Saver`, -and a `SessionManager` that takes care of common needs of Tensorflow +and a `SessionManager` that takes care of common needs of TensorFlow training programs. #### Use for a single program @@ -11,7 +11,7 @@ with tf.Graph().as_default(): ...add operations to the graph... # Create a Supervisor that will checkpoint the model in '/tmp/mydir'. sv = Supervisor(logdir='/tmp/mydir') - # Get a Tensorflow session managed by the supervisor. + # Get a TensorFlow session managed by the supervisor. with sv.managed_session(FLAGS.master) as sess: # Use the session to train the graph. while not sv.should_stop(): diff --git a/tensorflow/g3doc/api_docs/python/image.md b/tensorflow/g3doc/api_docs/python/image.md index 39b129f38e6..8aa6ff9f5a0 100644 --- a/tensorflow/g3doc/api_docs/python/image.md +++ b/tensorflow/g3doc/api_docs/python/image.md @@ -784,7 +784,7 @@ Internally, images are either stored in as one `float32` per channel per pixel (implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel per pixel (values are assumed to lie in `[0,255]`). -Tensorflow can convert between images in RGB or HSV. The conversion functions +TensorFlow can convert between images in RGB or HSV. The conversion functions work only on float images, so you need to convert images in other formats using [`convert_image_dtype`](#convert-image-dtype). diff --git a/tensorflow/g3doc/api_docs/python/sparse_ops.md b/tensorflow/g3doc/api_docs/python/sparse_ops.md index 1665420b5bb..a1d9d23bea7 100644 --- a/tensorflow/g3doc/api_docs/python/sparse_ops.md +++ b/tensorflow/g3doc/api_docs/python/sparse_ops.md @@ -9,7 +9,7 @@ Note: Functions taking `Tensor` arguments can also take anything accepted by ## Sparse Tensor Representation -Tensorflow supports a `SparseTensor` representation for data that is sparse +TensorFlow supports a `SparseTensor` representation for data that is sparse in multiple dimensions. Contrast this representation with `IndexedSlices`, which is efficient for representing tensors that are sparse in their first dimension, and dense along all other dimensions. @@ -20,7 +20,7 @@ dimension, and dense along all other dimensions. Represents a sparse tensor. -Tensorflow represents a sparse tensor as three separate dense tensors: +TensorFlow represents a sparse tensor as three separate dense tensors: `indices`, `values`, and `shape`. In Python, the three tensors are collected into a `SparseTensor` class for ease of use. If you have separate `indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor` diff --git a/tensorflow/g3doc/api_docs/python/train.md b/tensorflow/g3doc/api_docs/python/train.md index 4f63fa0b7c1..8a6cdb3ca71 100644 --- a/tensorflow/g3doc/api_docs/python/train.md +++ b/tensorflow/g3doc/api_docs/python/train.md @@ -1715,7 +1715,7 @@ This method currently blocks forever. A training helper that checkpoints models and computes summaries. The Supervisor is a small wrapper around a `Coordinator`, a `Saver`, -and a `SessionManager` that takes care of common needs of Tensorflow +and a `SessionManager` that takes care of common needs of TensorFlow training programs. #### Use for a single program @@ -1725,7 +1725,7 @@ with tf.Graph().as_default(): ...add operations to the graph... # Create a Supervisor that will checkpoint the model in '/tmp/mydir'. sv = Supervisor(logdir='/tmp/mydir') - # Get a Tensorflow session managed by the supervisor. + # Get a TensorFlow session managed by the supervisor. with sv.managed_session(FLAGS.master) as sess: # Use the session to train the graph. while not sv.should_stop(): diff --git a/tensorflow/g3doc/get_started/index.md b/tensorflow/g3doc/get_started/index.md index 32aabcb027c..d8dc20d9d9b 100644 --- a/tensorflow/g3doc/get_started/index.md +++ b/tensorflow/g3doc/get_started/index.md @@ -18,7 +18,7 @@ x_data = np.random.rand(100).astype(np.float32) y_data = x_data * 0.1 + 0.3 # Try to find values for W and b that compute y_data = W * x_data + b -# (We know that W should be 0.1 and b 0.3, but Tensorflow will +# (We know that W should be 0.1 and b 0.3, but TensorFlow will # figure that out for us.) W = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) b = tf.Variable(tf.zeros([1])) diff --git a/tensorflow/g3doc/how_tos/adding_an_op/index.md b/tensorflow/g3doc/how_tos/adding_an_op/index.md index 7fc5e787019..59cab6329a2 100644 --- a/tensorflow/g3doc/how_tos/adding_an_op/index.md +++ b/tensorflow/g3doc/how_tos/adding_an_op/index.md @@ -927,7 +927,7 @@ There are several ways to preserve backwards-compatibility. 5. Namespace any new Ops you create, by prefixing the Op names with something unique to your project. This avoids having your Op colliding with any Ops - that might be included in future versions of Tensorflow. + that might be included in future versions of TensorFlow. 6. Plan ahead! Try to anticipate future uses for the Op. Some signature changes can't be done in a compatible way (for example, making a list of the same diff --git a/tensorflow/g3doc/how_tos/documentation/index.md b/tensorflow/g3doc/how_tos/documentation/index.md index c38f4fed441..cebd4115d01 100755 --- a/tensorflow/g3doc/how_tos/documentation/index.md +++ b/tensorflow/g3doc/how_tos/documentation/index.md @@ -318,7 +318,7 @@ or class docstring where the Ops constructors are called out. Here's an example from the module docsting in `image_ops.py`: - Tensorflow can convert between images in RGB or HSV. The conversion + TensorFlow can convert between images in RGB or HSV. The conversion functions work only on `float` images, so you need to convert images in other formats using [`convert_image_dtype`](#convert-image-dtype). diff --git a/tensorflow/g3doc/how_tos/image_retraining/index.md b/tensorflow/g3doc/how_tos/image_retraining/index.md index 8ebbe57af1e..60de27d36be 100644 --- a/tensorflow/g3doc/how_tos/image_retraining/index.md +++ b/tensorflow/g3doc/how_tos/image_retraining/index.md @@ -159,7 +159,7 @@ You should see a list of flower labels, in most cases with daisy on top `--image` parameter with your own images to try those out, and use the C++ code as a template to integrate with your own applications. -If you'd like to use the retrained model in a Python program [this example from @eldor4do shows what you'll need to do](https://github.com/eldor4do/Tensorflow-Examples/blob/master/retraining-example.py). +If you'd like to use the retrained model in a Python program [this example from @eldor4do shows what you'll need to do](https://github.com/eldor4do/TensorFlow-Examples/blob/master/retraining-example.py). ## Training on Your Own Categories diff --git a/tensorflow/g3doc/how_tos/index.md b/tensorflow/g3doc/how_tos/index.md index ae99548abd3..c1f92b785e2 100644 --- a/tensorflow/g3doc/how_tos/index.md +++ b/tensorflow/g3doc/how_tos/index.md @@ -69,8 +69,8 @@ compose in your graph, but here are the details of how to add you own custom Op. ## How to write TensorFlow code -Tensorflow Style Guide is set of style decisions that both developers -and users of Tensorflow should follow to increase the readability of their code, +TensorFlow Style Guide is set of style decisions that both developers +and users of TensorFlow should follow to increase the readability of their code, reduce the number of errors, and promote consistency. [View Style Guide](style_guide.md) diff --git a/tensorflow/g3doc/how_tos/style_guide.md b/tensorflow/g3doc/how_tos/style_guide.md index a585d849845..095a00cd08b 100644 --- a/tensorflow/g3doc/how_tos/style_guide.md +++ b/tensorflow/g3doc/how_tos/style_guide.md @@ -1,6 +1,6 @@ # TensorFlow Style Guide -This page contains style decisions that both developers and users of Tensorflow +This page contains style decisions that both developers and users of TensorFlow should follow to increase the readability of their code, reduce the number of errors, and promote consistency. diff --git a/tensorflow/g3doc/tutorials/mnist/pros/index.md b/tensorflow/g3doc/tutorials/mnist/pros/index.md index 324a29c02eb..aa3ae13fa6c 100644 --- a/tensorflow/g3doc/tutorials/mnist/pros/index.md +++ b/tensorflow/g3doc/tutorials/mnist/pros/index.md @@ -36,7 +36,7 @@ will use below. ### Start TensorFlow InteractiveSession -Tensorflow relies on a highly efficient C++ backend to do its computation. The +TensorFlow relies on a highly efficient C++ backend to do its computation. The connection to this backend is called a session. The common usage for TensorFlow programs is to first create a graph and then launch it in a session. diff --git a/tensorflow/models/embedding/README.md b/tensorflow/models/embedding/README.md index d3a5bcda05f..6c22ac045ea 100644 --- a/tensorflow/models/embedding/README.md +++ b/tensorflow/models/embedding/README.md @@ -43,7 +43,7 @@ Here is a short overview of what is in this directory. File | What's in it? --- | --- -`word2vec.py` | A version of word2vec implemented using Tensorflow ops and minibatching. +`word2vec.py` | A version of word2vec implemented using TensorFlow ops and minibatching. `word2vec_test.py` | Integration test for word2vec. `word2vec_optimized.py` | A version of word2vec implemented using C ops that does no minibatching. `word2vec_optimized_test.py` | Integration test for word2vec_optimized. diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py index b17be16641c..67dbe40810f 100644 --- a/tensorflow/python/framework/ops.py +++ b/tensorflow/python/framework/ops.py @@ -895,7 +895,7 @@ IndexedSlicesValue = collections.namedtuple( class SparseTensor(object): """Represents a sparse tensor. - Tensorflow represents a sparse tensor as three separate dense tensors: + TensorFlow represents a sparse tensor as three separate dense tensors: `indices`, `values`, and `shape`. In Python, the three tensors are collected into a `SparseTensor` class for ease of use. If you have separate `indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor` diff --git a/tensorflow/python/kernel_tests/attention_ops_test.py b/tensorflow/python/kernel_tests/attention_ops_test.py index 605d485c86f..cbb90f3e8f3 100644 --- a/tensorflow/python/kernel_tests/attention_ops_test.py +++ b/tensorflow/python/kernel_tests/attention_ops_test.py @@ -83,7 +83,7 @@ class ExtractGlimpseTest(tf.test.TestCase): glimpse_cols = (tf.transpose( tf.image.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3])) - # Evaluate the Tensorflow Graph. + # Evaluate the TensorFlow Graph. with self.test_session() as sess: value_rows, value_cols = sess.run([glimpse_rows, glimpse_cols]) diff --git a/tensorflow/python/ops/image_ops.py b/tensorflow/python/ops/image_ops.py index 0d3c68d2e73..37d9d9d7dd2 100644 --- a/tensorflow/python/ops/image_ops.py +++ b/tensorflow/python/ops/image_ops.py @@ -106,7 +106,7 @@ Internally, images are either stored in as one `float32` per channel per pixel (implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel per pixel (values are assumed to lie in `[0,255]`). -Tensorflow can convert between images in RGB or HSV. The conversion functions +TensorFlow can convert between images in RGB or HSV. The conversion functions work only on float images, so you need to convert images in other formats using [`convert_image_dtype`](#convert-image-dtype). diff --git a/tensorflow/python/ops/sparse_ops.py b/tensorflow/python/ops/sparse_ops.py index b5877d27423..00b41b386d0 100644 --- a/tensorflow/python/ops/sparse_ops.py +++ b/tensorflow/python/ops/sparse_ops.py @@ -16,7 +16,7 @@ # pylint: disable=g-short-docstring-punctuation """## Sparse Tensor Representation -Tensorflow supports a `SparseTensor` representation for data that is sparse +TensorFlow supports a `SparseTensor` representation for data that is sparse in multiple dimensions. Contrast this representation with `IndexedSlices`, which is efficient for representing tensors that are sparse in their first dimension, and dense along all other dimensions. diff --git a/tensorflow/python/platform/base.i b/tensorflow/python/platform/base.i index 3c538a5f9dc..99aae3b2416 100644 --- a/tensorflow/python/platform/base.i +++ b/tensorflow/python/platform/base.i @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// Helper macros and typemaps for use in Tensorflow swig files. +// Helper macros and typemaps for use in TensorFlow swig files. // %{ #include <memory> diff --git a/tensorflow/python/summary/event_accumulator.py b/tensorflow/python/summary/event_accumulator.py index fffe05f41ba..5c5ce00508f 100644 --- a/tensorflow/python/summary/event_accumulator.py +++ b/tensorflow/python/summary/event_accumulator.py @@ -572,7 +572,7 @@ class EventAccumulator(object): If by_tags is True, purge all events that occurred after the given event.step, but only for the tags that the event has. Non-sequential - event.steps suggest that a Tensorflow restart occurred, and we discard + event.steps suggest that a TensorFlow restart occurred, and we discard the out-of-order events to display a consistent view in TensorBoard. Discarding by tags is the safer method, when we are unsure whether a restart diff --git a/tensorflow/python/training/supervisor.py b/tensorflow/python/training/supervisor.py index f07cc8291da..35505b82870 100644 --- a/tensorflow/python/training/supervisor.py +++ b/tensorflow/python/training/supervisor.py @@ -41,7 +41,7 @@ class Supervisor(object): """A training helper that checkpoints models and computes summaries. The Supervisor is a small wrapper around a `Coordinator`, a `Saver`, - and a `SessionManager` that takes care of common needs of Tensorflow + and a `SessionManager` that takes care of common needs of TensorFlow training programs. #### Use for a single program @@ -51,7 +51,7 @@ class Supervisor(object): ...add operations to the graph... # Create a Supervisor that will checkpoint the model in '/tmp/mydir'. sv = Supervisor(logdir='/tmp/mydir') - # Get a Tensorflow session managed by the supervisor. + # Get a TensorFlow session managed by the supervisor. with sv.managed_session(FLAGS.master) as sess: # Use the session to train the graph. while not sv.should_stop(): diff --git a/tensorflow/tensorboard/http_api.md b/tensorflow/tensorboard/http_api.md index 0e5744c608c..1ca5a48037a 100644 --- a/tensorflow/tensorboard/http_api.md +++ b/tensorflow/tensorboard/http_api.md @@ -327,7 +327,7 @@ proto. For example: ## Notes All returned values, histograms, audio, and images are returned in the order -they were written by Tensorflow (which should correspond to increasing +they were written by TensorFlow (which should correspond to increasing `wall_time` order, but may not necessarily correspond to increasing step count if the process had to restart from a previous checkpoint). diff --git a/tensorflow/tools/benchmark/BUILD b/tensorflow/tools/benchmark/BUILD index 5a7981e1127..0322ea766d4 100644 --- a/tensorflow/tools/benchmark/BUILD +++ b/tensorflow/tools/benchmark/BUILD @@ -65,7 +65,7 @@ tf_cc_test( # # NOTE: currently '-pthread' must be removed from the LINK_OPTS variable # in @protobuf//:BUILD to sucessfully build for Android. This is temporary -# pending an update of the version of the protobuf library that Tensorflow +# pending an update of the version of the protobuf library that TensorFlow # uses. cc_binary( name = "benchmark_model", diff --git a/tensorflow/tools/benchmark/README.md b/tensorflow/tools/benchmark/README.md index bcfed4ff142..4cd5c7c5242 100644 --- a/tensorflow/tools/benchmark/README.md +++ b/tensorflow/tools/benchmark/README.md @@ -1,4 +1,4 @@ -# Tensorflow Model Benchmark Tool +# TensorFlow Model Benchmark Tool ## Description diff --git a/tensorflow/tools/benchmark/benchmark_model.cc b/tensorflow/tools/benchmark/benchmark_model.cc index e23fd7d189e..ccb579fd03d 100644 --- a/tensorflow/tools/benchmark/benchmark_model.cc +++ b/tensorflow/tools/benchmark/benchmark_model.cc @@ -48,7 +48,7 @@ namespace benchmark_model { Status InitializeSession(int num_threads, const string& graph, std::unique_ptr<Session>* session, std::unique_ptr<StatSummarizer>* stats) { - LOG(INFO) << "Loading Tensorflow."; + LOG(INFO) << "Loading TensorFlow."; tensorflow::SessionOptions options; tensorflow::ConfigProto& config = options.config; @@ -61,7 +61,7 @@ Status InitializeSession(int num_threads, const string& graph, tensorflow::GraphDef tensorflow_graph; Status s = ReadBinaryProto(Env::Default(), graph, &tensorflow_graph); if (!s.ok()) { - LOG(ERROR) << "Could not create Tensorflow Graph: " << s; + LOG(ERROR) << "Could not create TensorFlow Graph: " << s; return s; } @@ -69,7 +69,7 @@ Status InitializeSession(int num_threads, const string& graph, s = (*session)->Create(tensorflow_graph); if (!s.ok()) { - LOG(ERROR) << "Could not create Tensorflow Session: " << s; + LOG(ERROR) << "Could not create TensorFlow Session: " << s; return s; } diff --git a/tensorflow/tools/ci_build/README.md b/tensorflow/tools/ci_build/README.md index 5160adebdb5..85a43f3d206 100644 --- a/tensorflow/tools/ci_build/README.md +++ b/tensorflow/tools/ci_build/README.md @@ -1,4 +1,4 @@ -# Tensorflow Builds +# TensorFlow Builds This directory contains all the files and setup instructions to run all the important builds and tests. **You can trivially run it yourself!** It also @@ -75,7 +75,7 @@ for incoming gerrit changes. Gpu tests and benchmark are coming soon. Check -## How Does Tensorflow Continuous Integration Work +## How Does TensorFlow Continuous Integration Work We use [jenkins](https://jenkins-ci.org/) as our continuous integration. It is running at [ci.tensorflow.org](http://ci.tensorflow.org). diff --git a/tensorflow/tools/dist_test/remote_test.sh b/tensorflow/tools/dist_test/remote_test.sh index 96add822ed3..b6625724182 100755 --- a/tensorflow/tools/dist_test/remote_test.sh +++ b/tensorflow/tools/dist_test/remote_test.sh @@ -45,7 +45,7 @@ # If any of the following environment variable has non-empty values, it will # be mapped into the docker container to override the default values (see # dist_test.sh) -# TF_DIST_GRPC_SERVER_URL: URL to an existing Tensorflow GRPC server. +# TF_DIST_GRPC_SERVER_URL: URL to an existing TensorFlow GRPC server. # If set to any non-empty and valid value (e.g., # grpc://1.2.3.4:2222), it will cause the test # to bypass the k8s cluster setup and diff --git a/tensorflow/tools/dist_test/scripts/k8s_tensorflow.py b/tensorflow/tools/dist_test/scripts/k8s_tensorflow.py index 3cbaaff32fa..3a427a1d4e6 100755 --- a/tensorflow/tools/dist_test/scripts/k8s_tensorflow.py +++ b/tensorflow/tools/dist_test/scripts/k8s_tensorflow.py @@ -14,7 +14,7 @@ # limitations under the License. # ============================================================================== -"""Generates YAML configuration files for distributed Tensorflow workers. +"""Generates YAML configuration files for distributed TensorFlow workers. The workers will be run in a Kubernetes (k8s) container cluster. """ diff --git a/tensorflow/tools/proto_text/gen_proto_text_functions.cc b/tensorflow/tools/proto_text/gen_proto_text_functions.cc index dd1df323081..cc664bb90b7 100644 --- a/tensorflow/tools/proto_text/gen_proto_text_functions.cc +++ b/tensorflow/tools/proto_text/gen_proto_text_functions.cc @@ -37,7 +37,7 @@ class CrashOnErrorCollector } }; -static const char kTensorflowHeaderPrefix[] = ""; +static const char kTensorFlowHeaderPrefix[] = ""; static const char kPlaceholderFile[] = "tensorflow/tools/proto_text/placeholder.txt"; @@ -77,7 +77,7 @@ int MainImpl(int argc, char** argv) { } const string output_root = argv[1]; - const string output_relative_path = kTensorflowHeaderPrefix + string(argv[2]); + const string output_relative_path = kTensorFlowHeaderPrefix + string(argv[2]); string src_relative_path; bool has_placeholder = false; @@ -114,7 +114,7 @@ int MainImpl(int argc, char** argv) { proto_path_no_suffix.substr(output_relative_path.size()); const auto code = - tensorflow::GetProtoTextFunctionCode(*fd, kTensorflowHeaderPrefix); + tensorflow::GetProtoTextFunctionCode(*fd, kTensorFlowHeaderPrefix); // Three passes, one for each output file. for (int pass = 0; pass < 3; ++pass) {