Merge changes from github.
PiperOrigin-RevId: 190161440
This commit is contained in:
parent
dbea93d7f1
commit
0dadbfe118
@ -40,7 +40,7 @@ _DEFAULT_CUDA_PATH = '/usr/local/cuda'
|
|||||||
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
|
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
|
||||||
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
|
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
|
||||||
'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
|
'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
|
||||||
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/x86_64-linux-gnu'
|
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine()
|
||||||
_TF_OPENCL_VERSION = '1.2'
|
_TF_OPENCL_VERSION = '1.2'
|
||||||
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
|
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
|
||||||
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
|
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
|
||||||
|
@ -42,7 +42,7 @@ def group_by_window(key_func,
|
|||||||
This transformation maps each consecutive element in a dataset to a key
|
This transformation maps each consecutive element in a dataset to a key
|
||||||
using `key_func` and groups the elements by key. It then applies
|
using `key_func` and groups the elements by key. It then applies
|
||||||
`reduce_func` to at most `window_size_func(key)` elements matching the same
|
`reduce_func` to at most `window_size_func(key)` elements matching the same
|
||||||
key. All execpt the final window for each key will contain
|
key. All except the final window for each key will contain
|
||||||
`window_size_func(key)` elements; the final window may be smaller.
|
`window_size_func(key)` elements; the final window may be smaller.
|
||||||
|
|
||||||
You may provide either a constant `window_size` or a window size determined by
|
You may provide either a constant `window_size` or a window size determined by
|
||||||
|
@ -28,7 +28,7 @@ val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.])
|
|||||||
out = tf.identity(val, name="out")
|
out = tf.identity(val, name="out")
|
||||||
with tf.Session() as sess:
|
with tf.Session() as sess:
|
||||||
tflite_model = tf.contrib.lite.toco_convert(sess.graph_def, [img], [out])
|
tflite_model = tf.contrib.lite.toco_convert(sess.graph_def, [img], [out])
|
||||||
open("test.tflite", "wb").write(tflite_modeL)
|
open("test.tflite", "wb").write(tflite_model)
|
||||||
```
|
```
|
||||||
|
|
||||||
**NOTE** Currently, the TOCO command will cause a fatal error to the Python
|
**NOTE** Currently, the TOCO command will cause a fatal error to the Python
|
||||||
|
@ -64,11 +64,11 @@ class LogMessageFatal : public LogMessage {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define _TF_LOG_INFO \
|
#define _TF_LOG_INFO \
|
||||||
::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::INFO)
|
::tensorflow::internal::LogMessage(__FILE__, __LINE__, ::tensorflow::INFO)
|
||||||
#define _TF_LOG_WARNING \
|
#define _TF_LOG_WARNING \
|
||||||
::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::WARNING)
|
::tensorflow::internal::LogMessage(__FILE__, __LINE__, ::tensorflow::WARNING)
|
||||||
#define _TF_LOG_ERROR \
|
#define _TF_LOG_ERROR \
|
||||||
::tensorflow::internal::LogMessage(__FILE__, __LINE__, tensorflow::ERROR)
|
::tensorflow::internal::LogMessage(__FILE__, __LINE__, ::tensorflow::ERROR)
|
||||||
#define _TF_LOG_FATAL \
|
#define _TF_LOG_FATAL \
|
||||||
::tensorflow::internal::LogMessageFatal(__FILE__, __LINE__)
|
::tensorflow::internal::LogMessageFatal(__FILE__, __LINE__)
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ it will use the `FooBarFileSystem` implementation.
|
|||||||
Next, you must build a shared object containing this implementation. An example
|
Next, you must build a shared object containing this implementation. An example
|
||||||
of doing so using bazel's `cc_binary` rule can be found
|
of doing so using bazel's `cc_binary` rule can be found
|
||||||
[here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/BUILD#L244),
|
[here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/BUILD#L244),
|
||||||
but you may use any build system to do so. See the section on @{$adding_an_op#build-the-op-library$building the op library} for similar
|
but you may use any build system to do so. See the section on @{$adding_an_op#build_the_op_library$building the op library} for similar
|
||||||
instructions.
|
instructions.
|
||||||
|
|
||||||
The result of building this target is a `.so` shared object file.
|
The result of building this target is a `.so` shared object file.
|
||||||
|
@ -167,7 +167,7 @@ REGISTER_KERNEL_BUILDER(Name("TextLineReader").Device(DEVICE_CPU),
|
|||||||
```
|
```
|
||||||
|
|
||||||
The last step is to add the Python wrapper. You can either do this by
|
The last step is to add the Python wrapper. You can either do this by
|
||||||
@{$adding_an_op#building_the_op_library$compiling a dynamic library}
|
@{$adding_an_op#build_the_op_library$compiling a dynamic library}
|
||||||
or, if you are building TensorFlow from source, adding to `user_ops.py`.
|
or, if you are building TensorFlow from source, adding to `user_ops.py`.
|
||||||
For the latter, you will import `tensorflow.python.ops.io_ops` in
|
For the latter, you will import `tensorflow.python.ops.io_ops` in
|
||||||
[`tensorflow/python/user_ops/user_ops.py`](https://www.tensorflow.org/code/tensorflow/python/user_ops/user_ops.py)
|
[`tensorflow/python/user_ops/user_ops.py`](https://www.tensorflow.org/code/tensorflow/python/user_ops/user_ops.py)
|
||||||
|
@ -198,9 +198,9 @@ Classifier"](#training_and_evaluating_the_cnn_mnist_classifier).
|
|||||||
### Input Layer
|
### Input Layer
|
||||||
|
|
||||||
The methods in the `layers` module for creating convolutional and pooling layers
|
The methods in the `layers` module for creating convolutional and pooling layers
|
||||||
for two-dimensional image data expect input tensors to have a shape of
|
for two-dimensional image data expect input tensors to have a `channels_last` shape of
|
||||||
<code>[<em>batch_size</em>, <em>image_width</em>, <em>image_height</em>,
|
<code>[<em>batch_size</em>, <em>image_height</em>, <em>image_width</em>, <em>channels</em>]</code>
|
||||||
<em>channels</em>]</code>, defined as follows:
|
or a `channels_first` shape of <code>[<em>batch_size</em>, <em>channels</em>, <em>image_height</em>, <em>image_width</em>]</code>, defined as follows:
|
||||||
|
|
||||||
* _`batch_size`_. Size of the subset of examples to use when performing
|
* _`batch_size`_. Size of the subset of examples to use when performing
|
||||||
gradient descent during training.
|
gradient descent during training.
|
||||||
|
@ -889,6 +889,8 @@ class BaseSession(SessionInterface):
|
|||||||
Either a single value if `fetches` is a single graph element, or
|
Either a single value if `fetches` is a single graph element, or
|
||||||
a list of values if `fetches` is a list, or a dictionary with the
|
a list of values if `fetches` is a list, or a dictionary with the
|
||||||
same keys as `fetches` if that is a dictionary (described above).
|
same keys as `fetches` if that is a dictionary (described above).
|
||||||
|
Order in which `fetches` operations are evaluated inside the call
|
||||||
|
is undefined.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
RuntimeError: If this `Session` is in an invalid state (e.g. has been
|
RuntimeError: If this `Session` is in an invalid state (e.g. has been
|
||||||
|
@ -167,6 +167,7 @@ from tensorflow.python.ops import gen_math_ops
|
|||||||
from tensorflow.python.ops import gen_nn_ops
|
from tensorflow.python.ops import gen_nn_ops
|
||||||
from tensorflow.python.ops import gen_sparse_ops
|
from tensorflow.python.ops import gen_sparse_ops
|
||||||
from tensorflow.python.ops import gen_spectral_ops
|
from tensorflow.python.ops import gen_spectral_ops
|
||||||
|
from tensorflow.python.platform import tf_logging as logging
|
||||||
# go/tf-wildcard-import
|
# go/tf-wildcard-import
|
||||||
# pylint: disable=wildcard-import
|
# pylint: disable=wildcard-import
|
||||||
from tensorflow.python.ops.gen_math_ops import *
|
from tensorflow.python.ops.gen_math_ops import *
|
||||||
@ -775,16 +776,18 @@ def cast(x, dtype, name=None):
|
|||||||
with ops.name_scope(name, "Cast", [x]) as name:
|
with ops.name_scope(name, "Cast", [x]) as name:
|
||||||
if isinstance(x, sparse_tensor.SparseTensor):
|
if isinstance(x, sparse_tensor.SparseTensor):
|
||||||
values_cast = cast(x.values, base_type, name=name)
|
values_cast = cast(x.values, base_type, name=name)
|
||||||
return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
|
x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
|
||||||
else:
|
else:
|
||||||
# TODO(josh11b): If x is not already a Tensor, we could return
|
# TODO(josh11b): If x is not already a Tensor, we could return
|
||||||
# ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
|
# ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
|
||||||
# allows some conversions that cast() can't do, e.g. casting numbers to
|
# allows some conversions that cast() can't do, e.g. casting numbers to
|
||||||
# strings.
|
# strings.
|
||||||
x = ops.convert_to_tensor(x, name="x")
|
x = ops.convert_to_tensor(x, name="x")
|
||||||
if x.dtype.base_dtype == base_type:
|
if x.dtype.base_dtype != base_type:
|
||||||
|
x = gen_math_ops.cast(x, base_type, name=name)
|
||||||
|
if x.dtype.is_complex and base_type.is_floating:
|
||||||
|
logging.warn("Casting complex to real discards imaginary part.")
|
||||||
return x
|
return x
|
||||||
return gen_math_ops.cast(x, base_type, name=name)
|
|
||||||
|
|
||||||
|
|
||||||
@tf_export("saturate_cast")
|
@tf_export("saturate_cast")
|
||||||
|
@ -247,7 +247,7 @@ def set_difference(a, b, aminusb=True, validate_indices=True):
|
|||||||
#
|
#
|
||||||
# collections.OrderedDict([
|
# collections.OrderedDict([
|
||||||
# ((0, 0, 0), 2),
|
# ((0, 0, 0), 2),
|
||||||
# ((0, 0, 1), 3),
|
# ((0, 1, 0), 3),
|
||||||
# ])
|
# ])
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -89,6 +89,7 @@ import shutil
|
|||||||
|
|
||||||
from six import text_type
|
from six import text_type
|
||||||
from google.cloud import datastore
|
from google.cloud import datastore
|
||||||
|
from six import text_type
|
||||||
|
|
||||||
|
|
||||||
def is_real_file(dirpath, fname):
|
def is_real_file(dirpath, fname):
|
||||||
|
2
third_party/sycl/sycl/BUILD.tpl
vendored
2
third_party/sycl/sycl/BUILD.tpl
vendored
@ -21,7 +21,7 @@ config_setting(
|
|||||||
name = "using_sycl_trisycl",
|
name = "using_sycl_trisycl",
|
||||||
define_values = {
|
define_values = {
|
||||||
"using_sycl": "true",
|
"using_sycl": "true",
|
||||||
"using_trisycl": "false",
|
"using_trisycl": "true",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user