s/Tensorflow/TensorFlow. A losing battle :)

Change: 127324936
This commit is contained in:
Vijay Vasudevan 2016-07-13 08:25:54 -08:00 committed by TensorFlower Gardener
parent 4f6e9efb40
commit c87a7ca311
48 changed files with 94 additions and 94 deletions

View File

@ -49,7 +49,7 @@ Change to your working directory:
Where *C:\Path\to* is the path to your real working directory. Where *C:\Path\to* is the path to your real working directory.
Create a folder where Tensorflow headers/libraries/binaries will be installed Create a folder where TensorFlow headers/libraries/binaries will be installed
after they are built: after they are built:
C:\Path\to>mkdir install C:\Path\to>mkdir install
@ -83,7 +83,7 @@ Go to the project folder:
C:\Path\to>cd tensorflow C:\Path\to>cd tensorflow
C:\Path\to\tensorflow> C:\Path\to\tensorflow>
Now go to *tensorflow\contrib\cmake* folder in Tensorflow's contrib sources: Now go to *tensorflow\contrib\cmake* folder in TensorFlow's contrib sources:
C:\Path\to\tensorflow>cd tensorflow\contrib\cmake C:\Path\to\tensorflow>cd tensorflow\contrib\cmake
C:\Path\to\tensorflow\tensorflow\contrib\cmake> C:\Path\to\tensorflow\tensorflow\contrib\cmake>
@ -101,7 +101,7 @@ and
[Visual Studio](http://www.cmake.org/cmake/help/latest/manual/cmake-generators.7.html#visual-studio-generators) [Visual Studio](http://www.cmake.org/cmake/help/latest/manual/cmake-generators.7.html#visual-studio-generators)
generators. generators.
We will use shadow building to separate the temporary files from the Tensorflow We will use shadow building to separate the temporary files from the TensorFlow
source code. source code.
Create a temporary *build* folder and change your working directory to it: Create a temporary *build* folder and change your working directory to it:
@ -143,7 +143,7 @@ It will generate *Visual Studio* solution file *tensorflow.sln* in current
directory. directory.
If the *gmock* directory does not exist, and/or you do not want to build If the *gmock* directory does not exist, and/or you do not want to build
Tensorflow unit tests, you need to add *cmake* command argument TensorFlow unit tests, you need to add *cmake* command argument
`-Dtensorflow_BUILD_TESTS=OFF` to disable testing. `-Dtensorflow_BUILD_TESTS=OFF` to disable testing.
Compiling Compiling
@ -219,7 +219,7 @@ If all tests are passed, safely continue.
Installing Installing
========== ==========
To install Tensorflow to the specified *install* folder: To install TensorFlow to the specified *install* folder:
[...]\contrib\cmake\build\release>nmake install [...]\contrib\cmake\build\release>nmake install
@ -232,7 +232,7 @@ It sounds not so strange and it works.
This will create the following folders under the *install* location: This will create the following folders under the *install* location:
* bin - that contains tensorflow binaries; * bin - that contains tensorflow binaries;
* include - that contains C++ headers and Tensorflow *.proto files; * include - that contains C++ headers and TensorFlow *.proto files;
* lib - that contains linking libraries and *CMake* configuration files for * lib - that contains linking libraries and *CMake* configuration files for
*tensorflow* package. *tensorflow* package.
@ -251,7 +251,7 @@ should link against release libtensorflow.lib library.
DLLs vs. static linking DLLs vs. static linking
======================= =======================
Static linking is now the default for the Tensorflow Buffer libraries. Due to Static linking is now the default for the TensorFlow Buffer libraries. Due to
issues with Win32's use of a separate heap for each DLL, as well as binary issues with Win32's use of a separate heap for each DLL, as well as binary
compatibility issues between different versions of MSVC's STL library, it is compatibility issues between different versions of MSVC's STL library, it is
recommended that you use static linkage only. However, it is possible to recommended that you use static linkage only. However, it is possible to
@ -270,7 +270,7 @@ compatibility between releases, so it is likely that future versions of these
libraries will *not* be usable as drop-in replacements. libraries will *not* be usable as drop-in replacements.
If your project is itself a DLL intended for use by third-party software, we If your project is itself a DLL intended for use by third-party software, we
recommend that you do NOT expose Tensorflow objects in your library's recommend that you do NOT expose TensorFlow objects in your library's
public interface, and that you statically link them into your library. public interface, and that you statically link them into your library.
Notes on Compiler Warnings Notes on Compiler Warnings

View File

@ -123,7 +123,7 @@ tensorflow::Status LoadModel(NSString* file_name, NSString* file_type,
tensorflow::Session* session_pointer = nullptr; tensorflow::Session* session_pointer = nullptr;
tensorflow::Status session_status = tensorflow::NewSession(options, &session_pointer); tensorflow::Status session_status = tensorflow::NewSession(options, &session_pointer);
if (!session_status.ok()) { if (!session_status.ok()) {
LOG(ERROR) << "Could not create Tensorflow Session: " << session_status; LOG(ERROR) << "Could not create TensorFlow Session: " << session_status;
return session_status; return session_status;
} }
session->reset(session_pointer); session->reset(session_pointer);
@ -149,7 +149,7 @@ tensorflow::Status LoadModel(NSString* file_name, NSString* file_type,
LOG(INFO) << "Creating session."; LOG(INFO) << "Creating session.";
tensorflow::Status create_status = (*session)->Create(tensorflow_graph); tensorflow::Status create_status = (*session)->Create(tensorflow_graph);
if (!create_status.ok()) { if (!create_status.ok()) {
LOG(ERROR) << "Could not create Tensorflow Graph: " << create_status; LOG(ERROR) << "Could not create TensorFlow Graph: " << create_status;
return create_status; return create_status;
} }

View File

@ -157,7 +157,7 @@ NSString* RunInferenceOnImage() {
LOG(INFO) << "Creating session."; LOG(INFO) << "Creating session.";
tensorflow::Status s = session->Create(tensorflow_graph); tensorflow::Status s = session->Create(tensorflow_graph);
if (!s.ok()) { if (!s.ok()) {
LOG(ERROR) << "Could not create Tensorflow Graph: " << s; LOG(ERROR) << "Could not create TensorFlow Graph: " << s;
return @""; return @"";
} }

View File

@ -224,7 +224,7 @@ def get_autoencoder_model(hidden_units, target_predictor_fn,
return dnn_autoencoder_estimator return dnn_autoencoder_estimator
## This will be in Tensorflow 0.7. ## This will be in TensorFlow 0.7.
## TODO(ilblackdragon): Clean this up when it's released ## TODO(ilblackdragon): Clean this up when it's released
@ -328,7 +328,7 @@ def bidirectional_rnn(cell_fw,
return outputs, array_ops_.concat(1, [state_fw, state_bw]) return outputs, array_ops_.concat(1, [state_fw, state_bw])
# End of Tensorflow 0.7 # End of TensorFlow 0.7
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional, def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,

View File

@ -1,4 +1,4 @@
# Description: Tensorflow Serving session bundle. # Description: TensorFlow Serving session bundle.
package( package(
default_visibility = ["//visibility:public"], default_visibility = ["//visibility:public"],

View File

@ -40,7 +40,7 @@ These exports have the following properties,
## Python exporting code ## Python exporting code
The [`Exporter`](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/session_bundle/exporter.py) The [`Exporter`](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/session_bundle/exporter.py)
class can be used to export a model in the above format from a Tensorflow python class can be used to export a model in the above format from a TensorFlow python
binary. binary.
## C++ initialization code ## C++ initialization code

View File

@ -1,4 +1,4 @@
# Description: Tensorflow Serving session_bundle example. # Description: TensorFlow Serving session_bundle example.
package( package(
default_visibility = ["//tensorflow/contrib/session_bundle:__subpackages__"], default_visibility = ["//tensorflow/contrib/session_bundle:__subpackages__"],
@ -13,12 +13,12 @@ exports_files(["LICENSE"])
filegroup( filegroup(
name = "all_files", name = "all_files",
srcs = glob( srcs = glob(
["**/*"], ["**/*"],
exclude = [ exclude = [
"**/METADATA", "**/METADATA",
"**/OWNERS", "**/OWNERS",
"g3doc/sitemap.md", "g3doc/sitemap.md",
], ],
), ),
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
@ -26,27 +26,27 @@ filegroup(
py_binary( py_binary(
name = "export_half_plus_two", name = "export_half_plus_two",
srcs = [ srcs = [
"export_half_plus_two.py", "export_half_plus_two.py",
], ],
srcs_version = "PY2AND3", srcs_version = "PY2AND3",
deps = [ deps = [
"//tensorflow:tensorflow_py", "//tensorflow:tensorflow_py",
"//tensorflow/contrib/session_bundle:exporter", "//tensorflow/contrib/session_bundle:exporter",
], ],
) )
genrule( genrule(
name = "half_plus_two", name = "half_plus_two",
outs = [ outs = [
"half_plus_two/00000123/export.meta", "half_plus_two/00000123/export.meta",
"half_plus_two/00000123/export-00000-of-00001", "half_plus_two/00000123/export-00000-of-00001",
], ],
cmd = cmd =
"rm -rf /tmp/half_plus_two; " + "rm -rf /tmp/half_plus_two; " +
"$(PYTHON_BIN_PATH) $(locations :export_half_plus_two); " + "$(PYTHON_BIN_PATH) $(locations :export_half_plus_two); " +
"cp -r /tmp/half_plus_two/* $(@D)/half_plus_two", "cp -r /tmp/half_plus_two/* $(@D)/half_plus_two",
tools = [ tools = [
":export_half_plus_two", ":export_half_plus_two",
], ],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )

View File

@ -124,7 +124,7 @@ void BasicTest(const string& export_path) {
outputs[0], test::AsTensor<float>({2, 2.5, 3, 3.5}, TensorShape({4, 1}))); outputs[0], test::AsTensor<float>({2, 2.5, 3, 3.5}, TensorShape({4, 1})));
} }
TEST(LoadSessionBundleFromPath, BasicTensorflowContrib) { TEST(LoadSessionBundleFromPath, BasicTensorFlowContrib) {
const string export_path = test_util::TestSrcDirPath( const string export_path = test_util::TestSrcDirPath(
"session_bundle/example/half_plus_two/00000123"); "session_bundle/example/half_plus_two/00000123");
BasicTest(export_path); BasicTest(export_path);

View File

@ -271,7 +271,7 @@ class Image(ItemHandler):
class TFExampleDecoder(data_decoder.DataDecoder): class TFExampleDecoder(data_decoder.DataDecoder):
"""A decoder for Tensorflow Examples. """A decoder for TensorFlow Examples.
Decoding Example proto buffers is comprised of two stages: (1) Example parsing Decoding Example proto buffers is comprised of two stages: (1) Example parsing
and (2) tensor manipulation. and (2) tensor manipulation.

View File

@ -181,7 +181,7 @@ def evaluation(sess,
written out using a summary writer. written out using a summary writer.
Args: Args:
sess: The current Tensorflow `Session`. sess: The current TensorFlow `Session`.
num_evals: The number of times to execute `eval_op`. num_evals: The number of times to execute `eval_op`.
init_op: An operation run at the beginning of evaluation. init_op: An operation run at the beginning of evaluation.
init_op_feed_dict: A feed dictionary to use when executing `init_op`. init_op_feed_dict: A feed dictionary to use when executing `init_op`.

View File

@ -1,4 +1,4 @@
# Tensorflow code for training random forests. # TensorFlow code for training random forests.
licenses(["notice"]) # Apache 2.0 licenses(["notice"]) # Apache 2.0
exports_files(["LICENSE"]) exports_files(["LICENSE"])

View File

@ -627,7 +627,7 @@ cc_library(
alwayslink = 1, alwayslink = 1,
) )
# Full Tensorflow library with operator support. Use this unless reducing # Full TensorFlow library with operator support. Use this unless reducing
# binary size (by packaging a reduced operator set) is a concern. # binary size (by packaging a reduced operator set) is a concern.
cc_library( cc_library(
name = "android_tensorflow_lib", name = "android_tensorflow_lib",
@ -706,7 +706,7 @@ filegroup(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
# Portable library providing testing functionality for Tensorflow. # Portable library providing testing functionality for TensorFlow.
cc_library( cc_library(
name = "android_tensorflow_test_lib", name = "android_tensorflow_test_lib",
testonly = 1, testonly = 1,

View File

@ -15,7 +15,7 @@ limitations under the License.
// A set of lightweight wrappers which simplify access to Example features. // A set of lightweight wrappers which simplify access to Example features.
// //
// Tensorflow Example proto uses associative maps on top of oneof fields. // TensorFlow Example proto uses associative maps on top of oneof fields.
// So accessing feature values is not very convenient. // So accessing feature values is not very convenient.
// //
// For example, to read a first value of integer feature "tag": // For example, to read a first value of integer feature "tag":

View File

@ -1,5 +1,5 @@
# Description: # Description:
# Tensorflow camera demo app for Android. # TensorFlow camera demo app for Android.
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])

View File

@ -1,6 +1,6 @@
# Tensorflow Android Camera Demo # TensorFlow Android Camera Demo
This folder contains a simple camera-based demo application utilizing Tensorflow. This folder contains a simple camera-based demo application utilizing TensorFlow.
## Description ## Description
@ -76,5 +76,5 @@ errors may not be obvious if the app halts immediately, so if you installed
with bazel and the app doesn't come up, then the easiest thing to do is try with bazel and the app doesn't come up, then the easiest thing to do is try
installing with adb. installing with adb.
Once the app is installed it will be named "Tensorflow Demo" and have the orange Once the app is installed it will be named "TensorFlow Demo" and have the orange
Tensorflow logo as its icon. TensorFlow logo as its icon.

View File

@ -41,7 +41,7 @@ limitations under the License.
using namespace tensorflow; using namespace tensorflow;
// Global variables that holds the Tensorflow classifier. // Global variables that holds the TensorFlow classifier.
static std::unique_ptr<tensorflow::Session> session; static std::unique_ptr<tensorflow::Session> session;
static std::vector<std::string> g_label_strings; static std::vector<std::string> g_label_strings;
@ -85,7 +85,7 @@ inline static int64 CurrentThreadTimeUs() {
return tv.tv_sec * 1000000 + tv.tv_usec; return tv.tv_sec * 1000000 + tv.tv_usec;
} }
JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)( JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorFlow)(
JNIEnv* env, jobject thiz, jobject java_asset_manager, jstring model, JNIEnv* env, jobject thiz, jobject java_asset_manager, jstring model,
jstring labels, jint num_classes, jint model_input_size, jint image_mean, jstring labels, jint num_classes, jint model_input_size, jint image_mean,
jfloat image_std, jstring input_name, jstring output_name) { jfloat image_std, jstring input_name, jstring output_name) {
@ -112,7 +112,7 @@ JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)(
g_output_name.reset( g_output_name.reset(
new std::string(env->GetStringUTFChars(output_name, NULL))); new std::string(env->GetStringUTFChars(output_name, NULL)));
LOG(INFO) << "Loading Tensorflow."; LOG(INFO) << "Loading TensorFlow.";
LOG(INFO) << "Making new SessionOptions."; LOG(INFO) << "Making new SessionOptions.";
tensorflow::SessionOptions options; tensorflow::SessionOptions options;
@ -137,12 +137,12 @@ JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)(
LOG(INFO) << "Creating session."; LOG(INFO) << "Creating session.";
tensorflow::Status s = session->Create(tensorflow_graph); tensorflow::Status s = session->Create(tensorflow_graph);
if (!s.ok()) { if (!s.ok()) {
LOG(FATAL) << "Could not create Tensorflow Graph: " << s; LOG(FATAL) << "Could not create TensorFlow Graph: " << s;
} }
// Clear the proto to save memory space. // Clear the proto to save memory space.
tensorflow_graph.Clear(); tensorflow_graph.Clear();
LOG(INFO) << "Tensorflow graph loaded from: " << model_cstr; LOG(INFO) << "TensorFlow graph loaded from: " << model_cstr;
// Read the label list // Read the label list
ReadFileToVector(asset_manager, labels_cstr, &g_label_strings); ReadFileToVector(asset_manager, labels_cstr, &g_label_strings);
@ -237,7 +237,7 @@ static std::string ClassifyImage(const RGBA* const bitmap_src) {
auto input_tensor_mapped = input_tensor.tensor<float, 4>(); auto input_tensor_mapped = input_tensor.tensor<float, 4>();
LOG(INFO) << "Tensorflow: Copying Data."; LOG(INFO) << "TensorFlow: Copying Data.";
for (int i = 0; i < g_tensorflow_input_size; ++i) { for (int i = 0; i < g_tensorflow_input_size; ++i) {
const RGBA* src = bitmap_src + i * g_tensorflow_input_size; const RGBA* src = bitmap_src + i * g_tensorflow_input_size;
for (int j = 0; j < g_tensorflow_input_size; ++j) { for (int j = 0; j < g_tensorflow_input_size; ++j) {

View File

@ -14,8 +14,8 @@ limitations under the License.
==============================================================================*/ ==============================================================================*/
// The methods are exposed to Java to allow for interaction with the native // The methods are exposed to Java to allow for interaction with the native
// Tensorflow code. See // TensorFlow code. See
// tensorflow/examples/android/src/org/tensorflow/TensorflowClassifier.java // tensorflow/examples/android/src/org/tensorflow/TensorFlowClassifier.java
// for the Java counterparts. // for the Java counterparts.
#ifndef ORG_TENSORFLOW_JNI_TENSORFLOW_JNI_H_ // NOLINT #ifndef ORG_TENSORFLOW_JNI_TENSORFLOW_JNI_H_ // NOLINT
@ -28,9 +28,9 @@ extern "C" {
#endif // __cplusplus #endif // __cplusplus
#define TENSORFLOW_METHOD(METHOD_NAME) \ #define TENSORFLOW_METHOD(METHOD_NAME) \
Java_org_tensorflow_demo_TensorflowClassifier_##METHOD_NAME // NOLINT Java_org_tensorflow_demo_TensorFlowClassifier_##METHOD_NAME // NOLINT
JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)( JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorFlow)(
JNIEnv* env, jobject thiz, jobject java_asset_manager, jstring model, JNIEnv* env, jobject thiz, jobject java_asset_manager, jstring model,
jstring labels, jint num_classes, jint model_input_size, jint image_mean, jstring labels, jint num_classes, jint model_input_size, jint image_mean,
jfloat image_std, jstring input_name, jstring output_name); jfloat image_std, jstring input_name, jstring output_name);

View File

@ -16,5 +16,5 @@
--> -->
<resources> <resources>
<string name="app_name">Tensorflow Demo</string> <string name="app_name">TensorFlow Demo</string>
</resources> </resources>

View File

@ -1,5 +1,5 @@
# Description: # Description:
# Tensorflow C++ inference example for labeling images. # TensorFlow C++ inference example for labeling images.
package(default_visibility = ["//tensorflow:internal"]) package(default_visibility = ["//tensorflow:internal"])

View File

@ -1,4 +1,4 @@
# Tensorflow C++ Image Recognition Demo # TensorFlow C++ Image Recognition Demo
This example shows how you can load a pre-trained TensorFlow network and use it This example shows how you can load a pre-trained TensorFlow network and use it
to recognize objects in images. to recognize objects in images.

View File

@ -1,6 +1,6 @@
Represents a sparse tensor. Represents a sparse tensor.
Tensorflow represents a sparse tensor as three separate dense tensors: TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `shape`. In Python, the three tensors are `indices`, `values`, and `shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor` `indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor`

View File

@ -1,7 +1,7 @@
A training helper that checkpoints models and computes summaries. A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`, The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of Tensorflow and a `SessionManager` that takes care of common needs of TensorFlow
training programs. training programs.
#### Use for a single program #### Use for a single program
@ -11,7 +11,7 @@ with tf.Graph().as_default():
...add operations to the graph... ...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'. # Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir') sv = Supervisor(logdir='/tmp/mydir')
# Get a Tensorflow session managed by the supervisor. # Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess: with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph. # Use the session to train the graph.
while not sv.should_stop(): while not sv.should_stop():

View File

@ -784,7 +784,7 @@ Internally, images are either stored in as one `float32` per channel per pixel
(implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel (implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel
per pixel (values are assumed to lie in `[0,255]`). per pixel (values are assumed to lie in `[0,255]`).
Tensorflow can convert between images in RGB or HSV. The conversion functions TensorFlow can convert between images in RGB or HSV. The conversion functions
work only on float images, so you need to convert images in other formats using work only on float images, so you need to convert images in other formats using
[`convert_image_dtype`](#convert-image-dtype). [`convert_image_dtype`](#convert-image-dtype).

View File

@ -9,7 +9,7 @@ Note: Functions taking `Tensor` arguments can also take anything accepted by
## Sparse Tensor Representation ## Sparse Tensor Representation
Tensorflow supports a `SparseTensor` representation for data that is sparse TensorFlow supports a `SparseTensor` representation for data that is sparse
in multiple dimensions. Contrast this representation with `IndexedSlices`, in multiple dimensions. Contrast this representation with `IndexedSlices`,
which is efficient for representing tensors that are sparse in their first which is efficient for representing tensors that are sparse in their first
dimension, and dense along all other dimensions. dimension, and dense along all other dimensions.
@ -20,7 +20,7 @@ dimension, and dense along all other dimensions.
Represents a sparse tensor. Represents a sparse tensor.
Tensorflow represents a sparse tensor as three separate dense tensors: TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `shape`. In Python, the three tensors are `indices`, `values`, and `shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor` `indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor`

View File

@ -1715,7 +1715,7 @@ This method currently blocks forever.
A training helper that checkpoints models and computes summaries. A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`, The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of Tensorflow and a `SessionManager` that takes care of common needs of TensorFlow
training programs. training programs.
#### Use for a single program #### Use for a single program
@ -1725,7 +1725,7 @@ with tf.Graph().as_default():
...add operations to the graph... ...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'. # Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir') sv = Supervisor(logdir='/tmp/mydir')
# Get a Tensorflow session managed by the supervisor. # Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess: with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph. # Use the session to train the graph.
while not sv.should_stop(): while not sv.should_stop():

View File

@ -18,7 +18,7 @@ x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3 y_data = x_data * 0.1 + 0.3
# Try to find values for W and b that compute y_data = W * x_data + b # Try to find values for W and b that compute y_data = W * x_data + b
# (We know that W should be 0.1 and b 0.3, but Tensorflow will # (We know that W should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.) # figure that out for us.)
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1])) b = tf.Variable(tf.zeros([1]))

View File

@ -927,7 +927,7 @@ There are several ways to preserve backwards-compatibility.
5. Namespace any new Ops you create, by prefixing the Op names with something 5. Namespace any new Ops you create, by prefixing the Op names with something
unique to your project. This avoids having your Op colliding with any Ops unique to your project. This avoids having your Op colliding with any Ops
that might be included in future versions of Tensorflow. that might be included in future versions of TensorFlow.
6. Plan ahead! Try to anticipate future uses for the Op. Some signature changes 6. Plan ahead! Try to anticipate future uses for the Op. Some signature changes
can't be done in a compatible way (for example, making a list of the same can't be done in a compatible way (for example, making a list of the same

View File

@ -318,7 +318,7 @@ or class docstring where the Ops constructors are called out.
Here's an example from the module docsting in `image_ops.py`: Here's an example from the module docsting in `image_ops.py`:
Tensorflow can convert between images in RGB or HSV. The conversion TensorFlow can convert between images in RGB or HSV. The conversion
functions work only on `float` images, so you need to convert images in functions work only on `float` images, so you need to convert images in
other formats using [`convert_image_dtype`](#convert-image-dtype). other formats using [`convert_image_dtype`](#convert-image-dtype).

View File

@ -159,7 +159,7 @@ You should see a list of flower labels, in most cases with daisy on top
`--image` parameter with your own images to try those out, and use the C++ code `--image` parameter with your own images to try those out, and use the C++ code
as a template to integrate with your own applications. as a template to integrate with your own applications.
If you'd like to use the retrained model in a Python program [this example from @eldor4do shows what you'll need to do](https://github.com/eldor4do/Tensorflow-Examples/blob/master/retraining-example.py). If you'd like to use the retrained model in a Python program [this example from @eldor4do shows what you'll need to do](https://github.com/eldor4do/TensorFlow-Examples/blob/master/retraining-example.py).
## Training on Your Own Categories ## Training on Your Own Categories

View File

@ -69,8 +69,8 @@ compose in your graph, but here are the details of how to add you own custom Op.
## How to write TensorFlow code ## How to write TensorFlow code
Tensorflow Style Guide is set of style decisions that both developers TensorFlow Style Guide is set of style decisions that both developers
and users of Tensorflow should follow to increase the readability of their code, and users of TensorFlow should follow to increase the readability of their code,
reduce the number of errors, and promote consistency. reduce the number of errors, and promote consistency.
[View Style Guide](style_guide.md) [View Style Guide](style_guide.md)

View File

@ -1,6 +1,6 @@
# TensorFlow Style Guide # TensorFlow Style Guide
This page contains style decisions that both developers and users of Tensorflow This page contains style decisions that both developers and users of TensorFlow
should follow to increase the readability of their code, reduce the number of should follow to increase the readability of their code, reduce the number of
errors, and promote consistency. errors, and promote consistency.

View File

@ -36,7 +36,7 @@ will use below.
### Start TensorFlow InteractiveSession ### Start TensorFlow InteractiveSession
Tensorflow relies on a highly efficient C++ backend to do its computation. The TensorFlow relies on a highly efficient C++ backend to do its computation. The
connection to this backend is called a session. The common usage for TensorFlow connection to this backend is called a session. The common usage for TensorFlow
programs is to first create a graph and then launch it in a session. programs is to first create a graph and then launch it in a session.

View File

@ -43,7 +43,7 @@ Here is a short overview of what is in this directory.
File | What's in it? File | What's in it?
--- | --- --- | ---
`word2vec.py` | A version of word2vec implemented using Tensorflow ops and minibatching. `word2vec.py` | A version of word2vec implemented using TensorFlow ops and minibatching.
`word2vec_test.py` | Integration test for word2vec. `word2vec_test.py` | Integration test for word2vec.
`word2vec_optimized.py` | A version of word2vec implemented using C ops that does no minibatching. `word2vec_optimized.py` | A version of word2vec implemented using C ops that does no minibatching.
`word2vec_optimized_test.py` | Integration test for word2vec_optimized. `word2vec_optimized_test.py` | Integration test for word2vec_optimized.

View File

@ -895,7 +895,7 @@ IndexedSlicesValue = collections.namedtuple(
class SparseTensor(object): class SparseTensor(object):
"""Represents a sparse tensor. """Represents a sparse tensor.
Tensorflow represents a sparse tensor as three separate dense tensors: TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `shape`. In Python, the three tensors are `indices`, `values`, and `shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor` `indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor`

View File

@ -83,7 +83,7 @@ class ExtractGlimpseTest(tf.test.TestCase):
glimpse_cols = (tf.transpose( glimpse_cols = (tf.transpose(
tf.image.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3])) tf.image.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
# Evaluate the Tensorflow Graph. # Evaluate the TensorFlow Graph.
with self.test_session() as sess: with self.test_session() as sess:
value_rows, value_cols = sess.run([glimpse_rows, glimpse_cols]) value_rows, value_cols = sess.run([glimpse_rows, glimpse_cols])

View File

@ -106,7 +106,7 @@ Internally, images are either stored in as one `float32` per channel per pixel
(implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel (implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel
per pixel (values are assumed to lie in `[0,255]`). per pixel (values are assumed to lie in `[0,255]`).
Tensorflow can convert between images in RGB or HSV. The conversion functions TensorFlow can convert between images in RGB or HSV. The conversion functions
work only on float images, so you need to convert images in other formats using work only on float images, so you need to convert images in other formats using
[`convert_image_dtype`](#convert-image-dtype). [`convert_image_dtype`](#convert-image-dtype).

View File

@ -16,7 +16,7 @@
# pylint: disable=g-short-docstring-punctuation # pylint: disable=g-short-docstring-punctuation
"""## Sparse Tensor Representation """## Sparse Tensor Representation
Tensorflow supports a `SparseTensor` representation for data that is sparse TensorFlow supports a `SparseTensor` representation for data that is sparse
in multiple dimensions. Contrast this representation with `IndexedSlices`, in multiple dimensions. Contrast this representation with `IndexedSlices`,
which is efficient for representing tensors that are sparse in their first which is efficient for representing tensors that are sparse in their first
dimension, and dense along all other dimensions. dimension, and dense along all other dimensions.

View File

@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
// Helper macros and typemaps for use in Tensorflow swig files. // Helper macros and typemaps for use in TensorFlow swig files.
// //
%{ %{
#include <memory> #include <memory>

View File

@ -572,7 +572,7 @@ class EventAccumulator(object):
If by_tags is True, purge all events that occurred after the given If by_tags is True, purge all events that occurred after the given
event.step, but only for the tags that the event has. Non-sequential event.step, but only for the tags that the event has. Non-sequential
event.steps suggest that a Tensorflow restart occurred, and we discard event.steps suggest that a TensorFlow restart occurred, and we discard
the out-of-order events to display a consistent view in TensorBoard. the out-of-order events to display a consistent view in TensorBoard.
Discarding by tags is the safer method, when we are unsure whether a restart Discarding by tags is the safer method, when we are unsure whether a restart

View File

@ -41,7 +41,7 @@ class Supervisor(object):
"""A training helper that checkpoints models and computes summaries. """A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`, The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of Tensorflow and a `SessionManager` that takes care of common needs of TensorFlow
training programs. training programs.
#### Use for a single program #### Use for a single program
@ -51,7 +51,7 @@ class Supervisor(object):
...add operations to the graph... ...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'. # Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir') sv = Supervisor(logdir='/tmp/mydir')
# Get a Tensorflow session managed by the supervisor. # Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess: with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph. # Use the session to train the graph.
while not sv.should_stop(): while not sv.should_stop():

View File

@ -327,7 +327,7 @@ proto. For example:
## Notes ## Notes
All returned values, histograms, audio, and images are returned in the order All returned values, histograms, audio, and images are returned in the order
they were written by Tensorflow (which should correspond to increasing they were written by TensorFlow (which should correspond to increasing
`wall_time` order, but may not necessarily correspond to increasing step count `wall_time` order, but may not necessarily correspond to increasing step count
if the process had to restart from a previous checkpoint). if the process had to restart from a previous checkpoint).

View File

@ -65,7 +65,7 @@ tf_cc_test(
# #
# NOTE: currently '-pthread' must be removed from the LINK_OPTS variable # NOTE: currently '-pthread' must be removed from the LINK_OPTS variable
# in @protobuf//:BUILD to sucessfully build for Android. This is temporary # in @protobuf//:BUILD to sucessfully build for Android. This is temporary
# pending an update of the version of the protobuf library that Tensorflow # pending an update of the version of the protobuf library that TensorFlow
# uses. # uses.
cc_binary( cc_binary(
name = "benchmark_model", name = "benchmark_model",

View File

@ -1,4 +1,4 @@
# Tensorflow Model Benchmark Tool # TensorFlow Model Benchmark Tool
## Description ## Description

View File

@ -48,7 +48,7 @@ namespace benchmark_model {
Status InitializeSession(int num_threads, const string& graph, Status InitializeSession(int num_threads, const string& graph,
std::unique_ptr<Session>* session, std::unique_ptr<Session>* session,
std::unique_ptr<StatSummarizer>* stats) { std::unique_ptr<StatSummarizer>* stats) {
LOG(INFO) << "Loading Tensorflow."; LOG(INFO) << "Loading TensorFlow.";
tensorflow::SessionOptions options; tensorflow::SessionOptions options;
tensorflow::ConfigProto& config = options.config; tensorflow::ConfigProto& config = options.config;
@ -61,7 +61,7 @@ Status InitializeSession(int num_threads, const string& graph,
tensorflow::GraphDef tensorflow_graph; tensorflow::GraphDef tensorflow_graph;
Status s = ReadBinaryProto(Env::Default(), graph, &tensorflow_graph); Status s = ReadBinaryProto(Env::Default(), graph, &tensorflow_graph);
if (!s.ok()) { if (!s.ok()) {
LOG(ERROR) << "Could not create Tensorflow Graph: " << s; LOG(ERROR) << "Could not create TensorFlow Graph: " << s;
return s; return s;
} }
@ -69,7 +69,7 @@ Status InitializeSession(int num_threads, const string& graph,
s = (*session)->Create(tensorflow_graph); s = (*session)->Create(tensorflow_graph);
if (!s.ok()) { if (!s.ok()) {
LOG(ERROR) << "Could not create Tensorflow Session: " << s; LOG(ERROR) << "Could not create TensorFlow Session: " << s;
return s; return s;
} }

View File

@ -1,4 +1,4 @@
# Tensorflow Builds # TensorFlow Builds
This directory contains all the files and setup instructions to run all This directory contains all the files and setup instructions to run all
the important builds and tests. **You can trivially run it yourself!** It also the important builds and tests. **You can trivially run it yourself!** It also
@ -75,7 +75,7 @@ for incoming gerrit changes. Gpu tests and benchmark are coming soon. Check
## How Does Tensorflow Continuous Integration Work ## How Does TensorFlow Continuous Integration Work
We use [jenkins](https://jenkins-ci.org/) as our continuous integration. We use [jenkins](https://jenkins-ci.org/) as our continuous integration.
It is running at [ci.tensorflow.org](http://ci.tensorflow.org). It is running at [ci.tensorflow.org](http://ci.tensorflow.org).

View File

@ -45,7 +45,7 @@
# If any of the following environment variable has non-empty values, it will # If any of the following environment variable has non-empty values, it will
# be mapped into the docker container to override the default values (see # be mapped into the docker container to override the default values (see
# dist_test.sh) # dist_test.sh)
# TF_DIST_GRPC_SERVER_URL: URL to an existing Tensorflow GRPC server. # TF_DIST_GRPC_SERVER_URL: URL to an existing TensorFlow GRPC server.
# If set to any non-empty and valid value (e.g., # If set to any non-empty and valid value (e.g.,
# grpc://1.2.3.4:2222), it will cause the test # grpc://1.2.3.4:2222), it will cause the test
# to bypass the k8s cluster setup and # to bypass the k8s cluster setup and

View File

@ -14,7 +14,7 @@
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
"""Generates YAML configuration files for distributed Tensorflow workers. """Generates YAML configuration files for distributed TensorFlow workers.
The workers will be run in a Kubernetes (k8s) container cluster. The workers will be run in a Kubernetes (k8s) container cluster.
""" """

View File

@ -37,7 +37,7 @@ class CrashOnErrorCollector
} }
}; };
static const char kTensorflowHeaderPrefix[] = ""; static const char kTensorFlowHeaderPrefix[] = "";
static const char kPlaceholderFile[] = static const char kPlaceholderFile[] =
"tensorflow/tools/proto_text/placeholder.txt"; "tensorflow/tools/proto_text/placeholder.txt";
@ -77,7 +77,7 @@ int MainImpl(int argc, char** argv) {
} }
const string output_root = argv[1]; const string output_root = argv[1];
const string output_relative_path = kTensorflowHeaderPrefix + string(argv[2]); const string output_relative_path = kTensorFlowHeaderPrefix + string(argv[2]);
string src_relative_path; string src_relative_path;
bool has_placeholder = false; bool has_placeholder = false;
@ -114,7 +114,7 @@ int MainImpl(int argc, char** argv) {
proto_path_no_suffix.substr(output_relative_path.size()); proto_path_no_suffix.substr(output_relative_path.size());
const auto code = const auto code =
tensorflow::GetProtoTextFunctionCode(*fd, kTensorflowHeaderPrefix); tensorflow::GetProtoTextFunctionCode(*fd, kTensorFlowHeaderPrefix);
// Three passes, one for each output file. // Three passes, one for each output file.
for (int pass = 0; pass < 3; ++pass) { for (int pass = 0; pass < 3; ++pass) {