s/Tensorflow/TensorFlow. A losing battle :)
Change: 127324936
This commit is contained in:
parent
4f6e9efb40
commit
c87a7ca311
@ -49,7 +49,7 @@ Change to your working directory:
|
||||
|
||||
Where *C:\Path\to* is the path to your real working directory.
|
||||
|
||||
Create a folder where Tensorflow headers/libraries/binaries will be installed
|
||||
Create a folder where TensorFlow headers/libraries/binaries will be installed
|
||||
after they are built:
|
||||
|
||||
C:\Path\to>mkdir install
|
||||
@ -83,7 +83,7 @@ Go to the project folder:
|
||||
C:\Path\to>cd tensorflow
|
||||
C:\Path\to\tensorflow>
|
||||
|
||||
Now go to *tensorflow\contrib\cmake* folder in Tensorflow's contrib sources:
|
||||
Now go to *tensorflow\contrib\cmake* folder in TensorFlow's contrib sources:
|
||||
|
||||
C:\Path\to\tensorflow>cd tensorflow\contrib\cmake
|
||||
C:\Path\to\tensorflow\tensorflow\contrib\cmake>
|
||||
@ -101,7 +101,7 @@ and
|
||||
[Visual Studio](http://www.cmake.org/cmake/help/latest/manual/cmake-generators.7.html#visual-studio-generators)
|
||||
generators.
|
||||
|
||||
We will use shadow building to separate the temporary files from the Tensorflow
|
||||
We will use shadow building to separate the temporary files from the TensorFlow
|
||||
source code.
|
||||
|
||||
Create a temporary *build* folder and change your working directory to it:
|
||||
@ -143,7 +143,7 @@ It will generate *Visual Studio* solution file *tensorflow.sln* in current
|
||||
directory.
|
||||
|
||||
If the *gmock* directory does not exist, and/or you do not want to build
|
||||
Tensorflow unit tests, you need to add *cmake* command argument
|
||||
TensorFlow unit tests, you need to add *cmake* command argument
|
||||
`-Dtensorflow_BUILD_TESTS=OFF` to disable testing.
|
||||
|
||||
Compiling
|
||||
@ -219,7 +219,7 @@ If all tests are passed, safely continue.
|
||||
Installing
|
||||
==========
|
||||
|
||||
To install Tensorflow to the specified *install* folder:
|
||||
To install TensorFlow to the specified *install* folder:
|
||||
|
||||
[...]\contrib\cmake\build\release>nmake install
|
||||
|
||||
@ -232,7 +232,7 @@ It sounds not so strange and it works.
|
||||
|
||||
This will create the following folders under the *install* location:
|
||||
* bin - that contains tensorflow binaries;
|
||||
* include - that contains C++ headers and Tensorflow *.proto files;
|
||||
* include - that contains C++ headers and TensorFlow *.proto files;
|
||||
* lib - that contains linking libraries and *CMake* configuration files for
|
||||
*tensorflow* package.
|
||||
|
||||
@ -251,7 +251,7 @@ should link against release libtensorflow.lib library.
|
||||
DLLs vs. static linking
|
||||
=======================
|
||||
|
||||
Static linking is now the default for the Tensorflow Buffer libraries. Due to
|
||||
Static linking is now the default for the TensorFlow Buffer libraries. Due to
|
||||
issues with Win32's use of a separate heap for each DLL, as well as binary
|
||||
compatibility issues between different versions of MSVC's STL library, it is
|
||||
recommended that you use static linkage only. However, it is possible to
|
||||
@ -270,7 +270,7 @@ compatibility between releases, so it is likely that future versions of these
|
||||
libraries will *not* be usable as drop-in replacements.
|
||||
|
||||
If your project is itself a DLL intended for use by third-party software, we
|
||||
recommend that you do NOT expose Tensorflow objects in your library's
|
||||
recommend that you do NOT expose TensorFlow objects in your library's
|
||||
public interface, and that you statically link them into your library.
|
||||
|
||||
Notes on Compiler Warnings
|
||||
|
@ -123,7 +123,7 @@ tensorflow::Status LoadModel(NSString* file_name, NSString* file_type,
|
||||
tensorflow::Session* session_pointer = nullptr;
|
||||
tensorflow::Status session_status = tensorflow::NewSession(options, &session_pointer);
|
||||
if (!session_status.ok()) {
|
||||
LOG(ERROR) << "Could not create Tensorflow Session: " << session_status;
|
||||
LOG(ERROR) << "Could not create TensorFlow Session: " << session_status;
|
||||
return session_status;
|
||||
}
|
||||
session->reset(session_pointer);
|
||||
@ -149,7 +149,7 @@ tensorflow::Status LoadModel(NSString* file_name, NSString* file_type,
|
||||
LOG(INFO) << "Creating session.";
|
||||
tensorflow::Status create_status = (*session)->Create(tensorflow_graph);
|
||||
if (!create_status.ok()) {
|
||||
LOG(ERROR) << "Could not create Tensorflow Graph: " << create_status;
|
||||
LOG(ERROR) << "Could not create TensorFlow Graph: " << create_status;
|
||||
return create_status;
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ NSString* RunInferenceOnImage() {
|
||||
LOG(INFO) << "Creating session.";
|
||||
tensorflow::Status s = session->Create(tensorflow_graph);
|
||||
if (!s.ok()) {
|
||||
LOG(ERROR) << "Could not create Tensorflow Graph: " << s;
|
||||
LOG(ERROR) << "Could not create TensorFlow Graph: " << s;
|
||||
return @"";
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,7 @@ def get_autoencoder_model(hidden_units, target_predictor_fn,
|
||||
return dnn_autoencoder_estimator
|
||||
|
||||
|
||||
## This will be in Tensorflow 0.7.
|
||||
## This will be in TensorFlow 0.7.
|
||||
## TODO(ilblackdragon): Clean this up when it's released
|
||||
|
||||
|
||||
@ -328,7 +328,7 @@ def bidirectional_rnn(cell_fw,
|
||||
|
||||
return outputs, array_ops_.concat(1, [state_fw, state_bw])
|
||||
|
||||
# End of Tensorflow 0.7
|
||||
# End of TensorFlow 0.7
|
||||
|
||||
|
||||
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Description: Tensorflow Serving session bundle.
|
||||
# Description: TensorFlow Serving session bundle.
|
||||
|
||||
package(
|
||||
default_visibility = ["//visibility:public"],
|
||||
|
@ -40,7 +40,7 @@ These exports have the following properties,
|
||||
## Python exporting code
|
||||
|
||||
The [`Exporter`](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/session_bundle/exporter.py)
|
||||
class can be used to export a model in the above format from a Tensorflow python
|
||||
class can be used to export a model in the above format from a TensorFlow python
|
||||
binary.
|
||||
|
||||
## C++ initialization code
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Description: Tensorflow Serving session_bundle example.
|
||||
# Description: TensorFlow Serving session_bundle example.
|
||||
|
||||
package(
|
||||
default_visibility = ["//tensorflow/contrib/session_bundle:__subpackages__"],
|
||||
@ -13,12 +13,12 @@ exports_files(["LICENSE"])
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
srcs = glob(
|
||||
["**/*"],
|
||||
exclude = [
|
||||
"**/METADATA",
|
||||
"**/OWNERS",
|
||||
"g3doc/sitemap.md",
|
||||
],
|
||||
["**/*"],
|
||||
exclude = [
|
||||
"**/METADATA",
|
||||
"**/OWNERS",
|
||||
"g3doc/sitemap.md",
|
||||
],
|
||||
),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@ -26,27 +26,27 @@ filegroup(
|
||||
py_binary(
|
||||
name = "export_half_plus_two",
|
||||
srcs = [
|
||||
"export_half_plus_two.py",
|
||||
"export_half_plus_two.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/session_bundle:exporter",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/session_bundle:exporter",
|
||||
],
|
||||
)
|
||||
|
||||
genrule(
|
||||
name = "half_plus_two",
|
||||
outs = [
|
||||
"half_plus_two/00000123/export.meta",
|
||||
"half_plus_two/00000123/export-00000-of-00001",
|
||||
"half_plus_two/00000123/export.meta",
|
||||
"half_plus_two/00000123/export-00000-of-00001",
|
||||
],
|
||||
cmd =
|
||||
"rm -rf /tmp/half_plus_two; " +
|
||||
"$(PYTHON_BIN_PATH) $(locations :export_half_plus_two); " +
|
||||
"cp -r /tmp/half_plus_two/* $(@D)/half_plus_two",
|
||||
"rm -rf /tmp/half_plus_two; " +
|
||||
"$(PYTHON_BIN_PATH) $(locations :export_half_plus_two); " +
|
||||
"cp -r /tmp/half_plus_two/* $(@D)/half_plus_two",
|
||||
tools = [
|
||||
":export_half_plus_two",
|
||||
":export_half_plus_two",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
@ -124,7 +124,7 @@ void BasicTest(const string& export_path) {
|
||||
outputs[0], test::AsTensor<float>({2, 2.5, 3, 3.5}, TensorShape({4, 1})));
|
||||
}
|
||||
|
||||
TEST(LoadSessionBundleFromPath, BasicTensorflowContrib) {
|
||||
TEST(LoadSessionBundleFromPath, BasicTensorFlowContrib) {
|
||||
const string export_path = test_util::TestSrcDirPath(
|
||||
"session_bundle/example/half_plus_two/00000123");
|
||||
BasicTest(export_path);
|
||||
|
@ -271,7 +271,7 @@ class Image(ItemHandler):
|
||||
|
||||
|
||||
class TFExampleDecoder(data_decoder.DataDecoder):
|
||||
"""A decoder for Tensorflow Examples.
|
||||
"""A decoder for TensorFlow Examples.
|
||||
|
||||
Decoding Example proto buffers is comprised of two stages: (1) Example parsing
|
||||
and (2) tensor manipulation.
|
||||
|
@ -181,7 +181,7 @@ def evaluation(sess,
|
||||
written out using a summary writer.
|
||||
|
||||
Args:
|
||||
sess: The current Tensorflow `Session`.
|
||||
sess: The current TensorFlow `Session`.
|
||||
num_evals: The number of times to execute `eval_op`.
|
||||
init_op: An operation run at the beginning of evaluation.
|
||||
init_op_feed_dict: A feed dictionary to use when executing `init_op`.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Tensorflow code for training random forests.
|
||||
# TensorFlow code for training random forests.
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
@ -627,7 +627,7 @@ cc_library(
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
# Full Tensorflow library with operator support. Use this unless reducing
|
||||
# Full TensorFlow library with operator support. Use this unless reducing
|
||||
# binary size (by packaging a reduced operator set) is a concern.
|
||||
cc_library(
|
||||
name = "android_tensorflow_lib",
|
||||
@ -706,7 +706,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Portable library providing testing functionality for Tensorflow.
|
||||
# Portable library providing testing functionality for TensorFlow.
|
||||
cc_library(
|
||||
name = "android_tensorflow_test_lib",
|
||||
testonly = 1,
|
||||
|
@ -15,7 +15,7 @@ limitations under the License.
|
||||
|
||||
// A set of lightweight wrappers which simplify access to Example features.
|
||||
//
|
||||
// Tensorflow Example proto uses associative maps on top of oneof fields.
|
||||
// TensorFlow Example proto uses associative maps on top of oneof fields.
|
||||
// So accessing feature values is not very convenient.
|
||||
//
|
||||
// For example, to read a first value of integer feature "tag":
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Description:
|
||||
# Tensorflow camera demo app for Android.
|
||||
# TensorFlow camera demo app for Android.
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Tensorflow Android Camera Demo
|
||||
# TensorFlow Android Camera Demo
|
||||
|
||||
This folder contains a simple camera-based demo application utilizing Tensorflow.
|
||||
This folder contains a simple camera-based demo application utilizing TensorFlow.
|
||||
|
||||
## Description
|
||||
|
||||
@ -76,5 +76,5 @@ errors may not be obvious if the app halts immediately, so if you installed
|
||||
with bazel and the app doesn't come up, then the easiest thing to do is try
|
||||
installing with adb.
|
||||
|
||||
Once the app is installed it will be named "Tensorflow Demo" and have the orange
|
||||
Tensorflow logo as its icon.
|
||||
Once the app is installed it will be named "TensorFlow Demo" and have the orange
|
||||
TensorFlow logo as its icon.
|
||||
|
@ -41,7 +41,7 @@ limitations under the License.
|
||||
|
||||
using namespace tensorflow;
|
||||
|
||||
// Global variables that holds the Tensorflow classifier.
|
||||
// Global variables that holds the TensorFlow classifier.
|
||||
static std::unique_ptr<tensorflow::Session> session;
|
||||
|
||||
static std::vector<std::string> g_label_strings;
|
||||
@ -85,7 +85,7 @@ inline static int64 CurrentThreadTimeUs() {
|
||||
return tv.tv_sec * 1000000 + tv.tv_usec;
|
||||
}
|
||||
|
||||
JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)(
|
||||
JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorFlow)(
|
||||
JNIEnv* env, jobject thiz, jobject java_asset_manager, jstring model,
|
||||
jstring labels, jint num_classes, jint model_input_size, jint image_mean,
|
||||
jfloat image_std, jstring input_name, jstring output_name) {
|
||||
@ -112,7 +112,7 @@ JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)(
|
||||
g_output_name.reset(
|
||||
new std::string(env->GetStringUTFChars(output_name, NULL)));
|
||||
|
||||
LOG(INFO) << "Loading Tensorflow.";
|
||||
LOG(INFO) << "Loading TensorFlow.";
|
||||
|
||||
LOG(INFO) << "Making new SessionOptions.";
|
||||
tensorflow::SessionOptions options;
|
||||
@ -137,12 +137,12 @@ JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)(
|
||||
LOG(INFO) << "Creating session.";
|
||||
tensorflow::Status s = session->Create(tensorflow_graph);
|
||||
if (!s.ok()) {
|
||||
LOG(FATAL) << "Could not create Tensorflow Graph: " << s;
|
||||
LOG(FATAL) << "Could not create TensorFlow Graph: " << s;
|
||||
}
|
||||
|
||||
// Clear the proto to save memory space.
|
||||
tensorflow_graph.Clear();
|
||||
LOG(INFO) << "Tensorflow graph loaded from: " << model_cstr;
|
||||
LOG(INFO) << "TensorFlow graph loaded from: " << model_cstr;
|
||||
|
||||
// Read the label list
|
||||
ReadFileToVector(asset_manager, labels_cstr, &g_label_strings);
|
||||
@ -237,7 +237,7 @@ static std::string ClassifyImage(const RGBA* const bitmap_src) {
|
||||
|
||||
auto input_tensor_mapped = input_tensor.tensor<float, 4>();
|
||||
|
||||
LOG(INFO) << "Tensorflow: Copying Data.";
|
||||
LOG(INFO) << "TensorFlow: Copying Data.";
|
||||
for (int i = 0; i < g_tensorflow_input_size; ++i) {
|
||||
const RGBA* src = bitmap_src + i * g_tensorflow_input_size;
|
||||
for (int j = 0; j < g_tensorflow_input_size; ++j) {
|
||||
|
@ -14,8 +14,8 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// The methods are exposed to Java to allow for interaction with the native
|
||||
// Tensorflow code. See
|
||||
// tensorflow/examples/android/src/org/tensorflow/TensorflowClassifier.java
|
||||
// TensorFlow code. See
|
||||
// tensorflow/examples/android/src/org/tensorflow/TensorFlowClassifier.java
|
||||
// for the Java counterparts.
|
||||
|
||||
#ifndef ORG_TENSORFLOW_JNI_TENSORFLOW_JNI_H_ // NOLINT
|
||||
@ -28,9 +28,9 @@ extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
#define TENSORFLOW_METHOD(METHOD_NAME) \
|
||||
Java_org_tensorflow_demo_TensorflowClassifier_##METHOD_NAME // NOLINT
|
||||
Java_org_tensorflow_demo_TensorFlowClassifier_##METHOD_NAME // NOLINT
|
||||
|
||||
JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorflow)(
|
||||
JNIEXPORT jint JNICALL TENSORFLOW_METHOD(initializeTensorFlow)(
|
||||
JNIEnv* env, jobject thiz, jobject java_asset_manager, jstring model,
|
||||
jstring labels, jint num_classes, jint model_input_size, jint image_mean,
|
||||
jfloat image_std, jstring input_name, jstring output_name);
|
||||
|
@ -16,5 +16,5 @@
|
||||
-->
|
||||
|
||||
<resources>
|
||||
<string name="app_name">Tensorflow Demo</string>
|
||||
<string name="app_name">TensorFlow Demo</string>
|
||||
</resources>
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Description:
|
||||
# Tensorflow C++ inference example for labeling images.
|
||||
# TensorFlow C++ inference example for labeling images.
|
||||
|
||||
package(default_visibility = ["//tensorflow:internal"])
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Tensorflow C++ Image Recognition Demo
|
||||
# TensorFlow C++ Image Recognition Demo
|
||||
|
||||
This example shows how you can load a pre-trained TensorFlow network and use it
|
||||
to recognize objects in images.
|
||||
|
@ -1,6 +1,6 @@
|
||||
Represents a sparse tensor.
|
||||
|
||||
Tensorflow represents a sparse tensor as three separate dense tensors:
|
||||
TensorFlow represents a sparse tensor as three separate dense tensors:
|
||||
`indices`, `values`, and `shape`. In Python, the three tensors are
|
||||
collected into a `SparseTensor` class for ease of use. If you have separate
|
||||
`indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor`
|
||||
|
@ -1,7 +1,7 @@
|
||||
A training helper that checkpoints models and computes summaries.
|
||||
|
||||
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
|
||||
and a `SessionManager` that takes care of common needs of Tensorflow
|
||||
and a `SessionManager` that takes care of common needs of TensorFlow
|
||||
training programs.
|
||||
|
||||
#### Use for a single program
|
||||
@ -11,7 +11,7 @@ with tf.Graph().as_default():
|
||||
...add operations to the graph...
|
||||
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
|
||||
sv = Supervisor(logdir='/tmp/mydir')
|
||||
# Get a Tensorflow session managed by the supervisor.
|
||||
# Get a TensorFlow session managed by the supervisor.
|
||||
with sv.managed_session(FLAGS.master) as sess:
|
||||
# Use the session to train the graph.
|
||||
while not sv.should_stop():
|
||||
|
@ -784,7 +784,7 @@ Internally, images are either stored in as one `float32` per channel per pixel
|
||||
(implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel
|
||||
per pixel (values are assumed to lie in `[0,255]`).
|
||||
|
||||
Tensorflow can convert between images in RGB or HSV. The conversion functions
|
||||
TensorFlow can convert between images in RGB or HSV. The conversion functions
|
||||
work only on float images, so you need to convert images in other formats using
|
||||
[`convert_image_dtype`](#convert-image-dtype).
|
||||
|
||||
|
@ -9,7 +9,7 @@ Note: Functions taking `Tensor` arguments can also take anything accepted by
|
||||
|
||||
## Sparse Tensor Representation
|
||||
|
||||
Tensorflow supports a `SparseTensor` representation for data that is sparse
|
||||
TensorFlow supports a `SparseTensor` representation for data that is sparse
|
||||
in multiple dimensions. Contrast this representation with `IndexedSlices`,
|
||||
which is efficient for representing tensors that are sparse in their first
|
||||
dimension, and dense along all other dimensions.
|
||||
@ -20,7 +20,7 @@ dimension, and dense along all other dimensions.
|
||||
|
||||
Represents a sparse tensor.
|
||||
|
||||
Tensorflow represents a sparse tensor as three separate dense tensors:
|
||||
TensorFlow represents a sparse tensor as three separate dense tensors:
|
||||
`indices`, `values`, and `shape`. In Python, the three tensors are
|
||||
collected into a `SparseTensor` class for ease of use. If you have separate
|
||||
`indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor`
|
||||
|
@ -1715,7 +1715,7 @@ This method currently blocks forever.
|
||||
A training helper that checkpoints models and computes summaries.
|
||||
|
||||
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
|
||||
and a `SessionManager` that takes care of common needs of Tensorflow
|
||||
and a `SessionManager` that takes care of common needs of TensorFlow
|
||||
training programs.
|
||||
|
||||
#### Use for a single program
|
||||
@ -1725,7 +1725,7 @@ with tf.Graph().as_default():
|
||||
...add operations to the graph...
|
||||
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
|
||||
sv = Supervisor(logdir='/tmp/mydir')
|
||||
# Get a Tensorflow session managed by the supervisor.
|
||||
# Get a TensorFlow session managed by the supervisor.
|
||||
with sv.managed_session(FLAGS.master) as sess:
|
||||
# Use the session to train the graph.
|
||||
while not sv.should_stop():
|
||||
|
@ -18,7 +18,7 @@ x_data = np.random.rand(100).astype(np.float32)
|
||||
y_data = x_data * 0.1 + 0.3
|
||||
|
||||
# Try to find values for W and b that compute y_data = W * x_data + b
|
||||
# (We know that W should be 0.1 and b 0.3, but Tensorflow will
|
||||
# (We know that W should be 0.1 and b 0.3, but TensorFlow will
|
||||
# figure that out for us.)
|
||||
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
|
||||
b = tf.Variable(tf.zeros([1]))
|
||||
|
@ -927,7 +927,7 @@ There are several ways to preserve backwards-compatibility.
|
||||
|
||||
5. Namespace any new Ops you create, by prefixing the Op names with something
|
||||
unique to your project. This avoids having your Op colliding with any Ops
|
||||
that might be included in future versions of Tensorflow.
|
||||
that might be included in future versions of TensorFlow.
|
||||
|
||||
6. Plan ahead! Try to anticipate future uses for the Op. Some signature changes
|
||||
can't be done in a compatible way (for example, making a list of the same
|
||||
|
@ -318,7 +318,7 @@ or class docstring where the Ops constructors are called out.
|
||||
|
||||
Here's an example from the module docsting in `image_ops.py`:
|
||||
|
||||
Tensorflow can convert between images in RGB or HSV. The conversion
|
||||
TensorFlow can convert between images in RGB or HSV. The conversion
|
||||
functions work only on `float` images, so you need to convert images in
|
||||
other formats using [`convert_image_dtype`](#convert-image-dtype).
|
||||
|
||||
|
@ -159,7 +159,7 @@ You should see a list of flower labels, in most cases with daisy on top
|
||||
`--image` parameter with your own images to try those out, and use the C++ code
|
||||
as a template to integrate with your own applications.
|
||||
|
||||
If you'd like to use the retrained model in a Python program [this example from @eldor4do shows what you'll need to do](https://github.com/eldor4do/Tensorflow-Examples/blob/master/retraining-example.py).
|
||||
If you'd like to use the retrained model in a Python program [this example from @eldor4do shows what you'll need to do](https://github.com/eldor4do/TensorFlow-Examples/blob/master/retraining-example.py).
|
||||
|
||||
## Training on Your Own Categories
|
||||
|
||||
|
@ -69,8 +69,8 @@ compose in your graph, but here are the details of how to add you own custom Op.
|
||||
|
||||
## How to write TensorFlow code
|
||||
|
||||
Tensorflow Style Guide is set of style decisions that both developers
|
||||
and users of Tensorflow should follow to increase the readability of their code,
|
||||
TensorFlow Style Guide is set of style decisions that both developers
|
||||
and users of TensorFlow should follow to increase the readability of their code,
|
||||
reduce the number of errors, and promote consistency.
|
||||
|
||||
[View Style Guide](style_guide.md)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# TensorFlow Style Guide
|
||||
|
||||
This page contains style decisions that both developers and users of Tensorflow
|
||||
This page contains style decisions that both developers and users of TensorFlow
|
||||
should follow to increase the readability of their code, reduce the number of
|
||||
errors, and promote consistency.
|
||||
|
||||
|
@ -36,7 +36,7 @@ will use below.
|
||||
|
||||
### Start TensorFlow InteractiveSession
|
||||
|
||||
Tensorflow relies on a highly efficient C++ backend to do its computation. The
|
||||
TensorFlow relies on a highly efficient C++ backend to do its computation. The
|
||||
connection to this backend is called a session. The common usage for TensorFlow
|
||||
programs is to first create a graph and then launch it in a session.
|
||||
|
||||
|
@ -43,7 +43,7 @@ Here is a short overview of what is in this directory.
|
||||
|
||||
File | What's in it?
|
||||
--- | ---
|
||||
`word2vec.py` | A version of word2vec implemented using Tensorflow ops and minibatching.
|
||||
`word2vec.py` | A version of word2vec implemented using TensorFlow ops and minibatching.
|
||||
`word2vec_test.py` | Integration test for word2vec.
|
||||
`word2vec_optimized.py` | A version of word2vec implemented using C ops that does no minibatching.
|
||||
`word2vec_optimized_test.py` | Integration test for word2vec_optimized.
|
||||
|
@ -895,7 +895,7 @@ IndexedSlicesValue = collections.namedtuple(
|
||||
class SparseTensor(object):
|
||||
"""Represents a sparse tensor.
|
||||
|
||||
Tensorflow represents a sparse tensor as three separate dense tensors:
|
||||
TensorFlow represents a sparse tensor as three separate dense tensors:
|
||||
`indices`, `values`, and `shape`. In Python, the three tensors are
|
||||
collected into a `SparseTensor` class for ease of use. If you have separate
|
||||
`indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor`
|
||||
|
@ -83,7 +83,7 @@ class ExtractGlimpseTest(tf.test.TestCase):
|
||||
glimpse_cols = (tf.transpose(
|
||||
tf.image.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
|
||||
|
||||
# Evaluate the Tensorflow Graph.
|
||||
# Evaluate the TensorFlow Graph.
|
||||
with self.test_session() as sess:
|
||||
value_rows, value_cols = sess.run([glimpse_rows, glimpse_cols])
|
||||
|
||||
|
@ -106,7 +106,7 @@ Internally, images are either stored in as one `float32` per channel per pixel
|
||||
(implicitly, values are assumed to lie in `[0,1)`) or one `uint8` per channel
|
||||
per pixel (values are assumed to lie in `[0,255]`).
|
||||
|
||||
Tensorflow can convert between images in RGB or HSV. The conversion functions
|
||||
TensorFlow can convert between images in RGB or HSV. The conversion functions
|
||||
work only on float images, so you need to convert images in other formats using
|
||||
[`convert_image_dtype`](#convert-image-dtype).
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
# pylint: disable=g-short-docstring-punctuation
|
||||
"""## Sparse Tensor Representation
|
||||
|
||||
Tensorflow supports a `SparseTensor` representation for data that is sparse
|
||||
TensorFlow supports a `SparseTensor` representation for data that is sparse
|
||||
in multiple dimensions. Contrast this representation with `IndexedSlices`,
|
||||
which is efficient for representing tensors that are sparse in their first
|
||||
dimension, and dense along all other dimensions.
|
||||
|
@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// Helper macros and typemaps for use in Tensorflow swig files.
|
||||
// Helper macros and typemaps for use in TensorFlow swig files.
|
||||
//
|
||||
%{
|
||||
#include <memory>
|
||||
|
@ -572,7 +572,7 @@ class EventAccumulator(object):
|
||||
|
||||
If by_tags is True, purge all events that occurred after the given
|
||||
event.step, but only for the tags that the event has. Non-sequential
|
||||
event.steps suggest that a Tensorflow restart occurred, and we discard
|
||||
event.steps suggest that a TensorFlow restart occurred, and we discard
|
||||
the out-of-order events to display a consistent view in TensorBoard.
|
||||
|
||||
Discarding by tags is the safer method, when we are unsure whether a restart
|
||||
|
@ -41,7 +41,7 @@ class Supervisor(object):
|
||||
"""A training helper that checkpoints models and computes summaries.
|
||||
|
||||
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
|
||||
and a `SessionManager` that takes care of common needs of Tensorflow
|
||||
and a `SessionManager` that takes care of common needs of TensorFlow
|
||||
training programs.
|
||||
|
||||
#### Use for a single program
|
||||
@ -51,7 +51,7 @@ class Supervisor(object):
|
||||
...add operations to the graph...
|
||||
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
|
||||
sv = Supervisor(logdir='/tmp/mydir')
|
||||
# Get a Tensorflow session managed by the supervisor.
|
||||
# Get a TensorFlow session managed by the supervisor.
|
||||
with sv.managed_session(FLAGS.master) as sess:
|
||||
# Use the session to train the graph.
|
||||
while not sv.should_stop():
|
||||
|
@ -327,7 +327,7 @@ proto. For example:
|
||||
## Notes
|
||||
|
||||
All returned values, histograms, audio, and images are returned in the order
|
||||
they were written by Tensorflow (which should correspond to increasing
|
||||
they were written by TensorFlow (which should correspond to increasing
|
||||
`wall_time` order, but may not necessarily correspond to increasing step count
|
||||
if the process had to restart from a previous checkpoint).
|
||||
|
||||
|
@ -65,7 +65,7 @@ tf_cc_test(
|
||||
#
|
||||
# NOTE: currently '-pthread' must be removed from the LINK_OPTS variable
|
||||
# in @protobuf//:BUILD to sucessfully build for Android. This is temporary
|
||||
# pending an update of the version of the protobuf library that Tensorflow
|
||||
# pending an update of the version of the protobuf library that TensorFlow
|
||||
# uses.
|
||||
cc_binary(
|
||||
name = "benchmark_model",
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Tensorflow Model Benchmark Tool
|
||||
# TensorFlow Model Benchmark Tool
|
||||
|
||||
## Description
|
||||
|
||||
|
@ -48,7 +48,7 @@ namespace benchmark_model {
|
||||
Status InitializeSession(int num_threads, const string& graph,
|
||||
std::unique_ptr<Session>* session,
|
||||
std::unique_ptr<StatSummarizer>* stats) {
|
||||
LOG(INFO) << "Loading Tensorflow.";
|
||||
LOG(INFO) << "Loading TensorFlow.";
|
||||
|
||||
tensorflow::SessionOptions options;
|
||||
tensorflow::ConfigProto& config = options.config;
|
||||
@ -61,7 +61,7 @@ Status InitializeSession(int num_threads, const string& graph,
|
||||
tensorflow::GraphDef tensorflow_graph;
|
||||
Status s = ReadBinaryProto(Env::Default(), graph, &tensorflow_graph);
|
||||
if (!s.ok()) {
|
||||
LOG(ERROR) << "Could not create Tensorflow Graph: " << s;
|
||||
LOG(ERROR) << "Could not create TensorFlow Graph: " << s;
|
||||
return s;
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ Status InitializeSession(int num_threads, const string& graph,
|
||||
|
||||
s = (*session)->Create(tensorflow_graph);
|
||||
if (!s.ok()) {
|
||||
LOG(ERROR) << "Could not create Tensorflow Session: " << s;
|
||||
LOG(ERROR) << "Could not create TensorFlow Session: " << s;
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Tensorflow Builds
|
||||
# TensorFlow Builds
|
||||
|
||||
This directory contains all the files and setup instructions to run all
|
||||
the important builds and tests. **You can trivially run it yourself!** It also
|
||||
@ -75,7 +75,7 @@ for incoming gerrit changes. Gpu tests and benchmark are coming soon. Check
|
||||
|
||||
|
||||
|
||||
## How Does Tensorflow Continuous Integration Work
|
||||
## How Does TensorFlow Continuous Integration Work
|
||||
|
||||
We use [jenkins](https://jenkins-ci.org/) as our continuous integration.
|
||||
It is running at [ci.tensorflow.org](http://ci.tensorflow.org).
|
||||
|
@ -45,7 +45,7 @@
|
||||
# If any of the following environment variable has non-empty values, it will
|
||||
# be mapped into the docker container to override the default values (see
|
||||
# dist_test.sh)
|
||||
# TF_DIST_GRPC_SERVER_URL: URL to an existing Tensorflow GRPC server.
|
||||
# TF_DIST_GRPC_SERVER_URL: URL to an existing TensorFlow GRPC server.
|
||||
# If set to any non-empty and valid value (e.g.,
|
||||
# grpc://1.2.3.4:2222), it will cause the test
|
||||
# to bypass the k8s cluster setup and
|
||||
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Generates YAML configuration files for distributed Tensorflow workers.
|
||||
"""Generates YAML configuration files for distributed TensorFlow workers.
|
||||
|
||||
The workers will be run in a Kubernetes (k8s) container cluster.
|
||||
"""
|
||||
|
@ -37,7 +37,7 @@ class CrashOnErrorCollector
|
||||
}
|
||||
};
|
||||
|
||||
static const char kTensorflowHeaderPrefix[] = "";
|
||||
static const char kTensorFlowHeaderPrefix[] = "";
|
||||
|
||||
static const char kPlaceholderFile[] =
|
||||
"tensorflow/tools/proto_text/placeholder.txt";
|
||||
@ -77,7 +77,7 @@ int MainImpl(int argc, char** argv) {
|
||||
}
|
||||
|
||||
const string output_root = argv[1];
|
||||
const string output_relative_path = kTensorflowHeaderPrefix + string(argv[2]);
|
||||
const string output_relative_path = kTensorFlowHeaderPrefix + string(argv[2]);
|
||||
|
||||
string src_relative_path;
|
||||
bool has_placeholder = false;
|
||||
@ -114,7 +114,7 @@ int MainImpl(int argc, char** argv) {
|
||||
proto_path_no_suffix.substr(output_relative_path.size());
|
||||
|
||||
const auto code =
|
||||
tensorflow::GetProtoTextFunctionCode(*fd, kTensorflowHeaderPrefix);
|
||||
tensorflow::GetProtoTextFunctionCode(*fd, kTensorFlowHeaderPrefix);
|
||||
|
||||
// Three passes, one for each output file.
|
||||
for (int pass = 0; pass < 3; ++pass) {
|
||||
|
Loading…
Reference in New Issue
Block a user