diff --git a/configure.py b/configure.py index ea732c64e2a..22b9abedd77 100644 --- a/configure.py +++ b/configure.py @@ -40,7 +40,7 @@ _DEFAULT_CUDA_PATH = '/usr/local/cuda' _DEFAULT_CUDA_PATH_LINUX = '/opt/cuda' _DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing ' 'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION) -_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/x86_64-linux-gnu' +_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine() _TF_OPENCL_VERSION = '1.2' _DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp' _DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include' diff --git a/tensorflow/contrib/data/python/ops/grouping.py b/tensorflow/contrib/data/python/ops/grouping.py index a19be222545..ae10d2eb22d 100644 --- a/tensorflow/contrib/data/python/ops/grouping.py +++ b/tensorflow/contrib/data/python/ops/grouping.py @@ -42,7 +42,7 @@ def group_by_window(key_func, This transformation maps each consecutive element in a dataset to a key using `key_func` and groups the elements by key. It then applies `reduce_func` to at most `window_size_func(key)` elements matching the same - key. All execpt the final window for each key will contain + key. All except the final window for each key will contain `window_size_func(key)` elements; the final window may be smaller. You may provide either a constant `window_size` or a window size determined by diff --git a/tensorflow/contrib/lite/toco/g3doc/python_api.md b/tensorflow/contrib/lite/toco/g3doc/python_api.md index 440f9c367c2..36e2d9c3723 100644 --- a/tensorflow/contrib/lite/toco/g3doc/python_api.md +++ b/tensorflow/contrib/lite/toco/g3doc/python_api.md @@ -28,7 +28,7 @@ val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.]) out = tf.identity(val, name="out") with tf.Session() as sess: tflite_model = tf.contrib.lite.toco_convert(sess.graph_def, [img], [out]) - open("test.tflite", "wb").write(tflite_modeL) + open("test.tflite", "wb").write(tflite_model) ``` **NOTE** Currently, the TOCO command will cause a fatal error to the Python diff --git a/tensorflow/core/platform/default/logging.h b/tensorflow/core/platform/default/logging.h index f0efa31d557..2c134f1be93 100644 --- a/tensorflow/core/platform/default/logging.h +++ b/tensorflow/core/platform/default/logging.h @@ -64,11 +64,11 @@ class LogMessageFatal : public LogMessage { }; #define _TF_LOG_INFO \ - ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::INFO) + ::tensorflow::internal::LogMessage(__FILE__, __LINE__, ::tensorflow::INFO) #define _TF_LOG_WARNING \ - ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::WARNING) + ::tensorflow::internal::LogMessage(__FILE__, __LINE__, ::tensorflow::WARNING) #define _TF_LOG_ERROR \ - ::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::ERROR) + ::tensorflow::internal::LogMessage(__FILE__, __LINE__, ::tensorflow::ERROR) #define _TF_LOG_FATAL \ ::tensorflow::internal::LogMessageFatal(__FILE__, __LINE__) diff --git a/tensorflow/docs_src/extend/add_filesys.md b/tensorflow/docs_src/extend/add_filesys.md index 06f11de4eb0..bc0f662f0cf 100644 --- a/tensorflow/docs_src/extend/add_filesys.md +++ b/tensorflow/docs_src/extend/add_filesys.md @@ -225,7 +225,7 @@ it will use the `FooBarFileSystem` implementation. Next, you must build a shared object containing this implementation. An example of doing so using bazel's `cc_binary` rule can be found [here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/BUILD#L244), -but you may use any build system to do so. See the section on @{$adding_an_op#build-the-op-library$building the op library} for similar +but you may use any build system to do so. See the section on @{$adding_an_op#build_the_op_library$building the op library} for similar instructions. The result of building this target is a `.so` shared object file. diff --git a/tensorflow/docs_src/extend/new_data_formats.md b/tensorflow/docs_src/extend/new_data_formats.md index b3cc9680474..10e717c280f 100644 --- a/tensorflow/docs_src/extend/new_data_formats.md +++ b/tensorflow/docs_src/extend/new_data_formats.md @@ -167,7 +167,7 @@ REGISTER_KERNEL_BUILDER(Name("TextLineReader").Device(DEVICE_CPU), ``` The last step is to add the Python wrapper. You can either do this by -@{$adding_an_op#building_the_op_library$compiling a dynamic library} +@{$adding_an_op#build_the_op_library$compiling a dynamic library} or, if you are building TensorFlow from source, adding to `user_ops.py`. For the latter, you will import `tensorflow.python.ops.io_ops` in [`tensorflow/python/user_ops/user_ops.py`](https://www.tensorflow.org/code/tensorflow/python/user_ops/user_ops.py) diff --git a/tensorflow/docs_src/tutorials/layers.md b/tensorflow/docs_src/tutorials/layers.md index 9b17d0d4d52..aeb746f29c2 100644 --- a/tensorflow/docs_src/tutorials/layers.md +++ b/tensorflow/docs_src/tutorials/layers.md @@ -198,9 +198,9 @@ Classifier"](#training_and_evaluating_the_cnn_mnist_classifier). ### Input Layer The methods in the `layers` module for creating convolutional and pooling layers -for two-dimensional image data expect input tensors to have a shape of -[batch_size, image_width, image_height, -channels], defined as follows: +for two-dimensional image data expect input tensors to have a `channels_last` shape of +[batch_size, image_height, image_width, channels] +or a `channels_first` shape of [batch_size, channels, image_height, image_width], defined as follows: * _`batch_size`_. Size of the subset of examples to use when performing gradient descent during training. diff --git a/tensorflow/python/client/session.py b/tensorflow/python/client/session.py index 6e9ce9b0806..da5dc6f5998 100644 --- a/tensorflow/python/client/session.py +++ b/tensorflow/python/client/session.py @@ -889,6 +889,8 @@ class BaseSession(SessionInterface): Either a single value if `fetches` is a single graph element, or a list of values if `fetches` is a list, or a dictionary with the same keys as `fetches` if that is a dictionary (described above). + Order in which `fetches` operations are evaluated inside the call + is undefined. Raises: RuntimeError: If this `Session` is in an invalid state (e.g. has been diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py index 4699e05269d..276897ab99e 100644 --- a/tensorflow/python/ops/math_ops.py +++ b/tensorflow/python/ops/math_ops.py @@ -167,6 +167,7 @@ from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import gen_spectral_ops +from tensorflow.python.platform import tf_logging as logging # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_math_ops import * @@ -775,16 +776,18 @@ def cast(x, dtype, name=None): with ops.name_scope(name, "Cast", [x]) as name: if isinstance(x, sparse_tensor.SparseTensor): values_cast = cast(x.values, base_type, name=name) - return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape) + x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape) else: # TODO(josh11b): If x is not already a Tensor, we could return # ops.convert_to_tensor(x, dtype=dtype, ...) here, but that # allows some conversions that cast() can't do, e.g. casting numbers to # strings. x = ops.convert_to_tensor(x, name="x") - if x.dtype.base_dtype == base_type: - return x - return gen_math_ops.cast(x, base_type, name=name) + if x.dtype.base_dtype != base_type: + x = gen_math_ops.cast(x, base_type, name=name) + if x.dtype.is_complex and base_type.is_floating: + logging.warn("Casting complex to real discards imaginary part.") + return x @tf_export("saturate_cast") diff --git a/tensorflow/python/ops/sets_impl.py b/tensorflow/python/ops/sets_impl.py index b0eecd8a1e8..21e08d03d21 100644 --- a/tensorflow/python/ops/sets_impl.py +++ b/tensorflow/python/ops/sets_impl.py @@ -247,7 +247,7 @@ def set_difference(a, b, aminusb=True, validate_indices=True): # # collections.OrderedDict([ # ((0, 0, 0), 2), - # ((0, 0, 1), 3), + # ((0, 1, 0), 3), # ]) ``` diff --git a/tensorflow/tools/test/upload_test_benchmarks.py b/tensorflow/tools/test/upload_test_benchmarks.py index c0305751092..9c45359ee1b 100644 --- a/tensorflow/tools/test/upload_test_benchmarks.py +++ b/tensorflow/tools/test/upload_test_benchmarks.py @@ -89,6 +89,7 @@ import shutil from six import text_type from google.cloud import datastore +from six import text_type def is_real_file(dirpath, fname): diff --git a/third_party/sycl/sycl/BUILD.tpl b/third_party/sycl/sycl/BUILD.tpl index 21b1a2bbf7d..b7e9aa8edb4 100755 --- a/third_party/sycl/sycl/BUILD.tpl +++ b/third_party/sycl/sycl/BUILD.tpl @@ -21,7 +21,7 @@ config_setting( name = "using_sycl_trisycl", define_values = { "using_sycl": "true", - "using_trisycl": "false", + "using_trisycl": "true", }, )