Apply tf1->tf2 name replaces to doc-strings and comments in tensorflow.
No code changes, only doc-strings and comments (I checked). PiperOrigin-RevId: 243885306
This commit is contained in:
parent
68ec4096cb
commit
1bde2c4ce9
tensorflow
compiler/tests
python
autograph
client
eager
backprop.pycontext.pydef_function.pyexecution_callbacks.pyfunction.pygraph_only_ops.pyremote.pywrap_function.py
feature_column
grappler
layers
module
platform
profiler
summary
tpu
util
@ -454,7 +454,7 @@ class PoolGradTest(xla_test.XLATestCase):
|
||||
"""Verifies the output values of the pooling function.
|
||||
|
||||
Args:
|
||||
pool_func: Pooling function to be called, e.g., tf.nn.max_pool
|
||||
pool_func: Pooling function to be called, e.g., tf.nn.max_pool2d
|
||||
pool_grad_func: Corresponding pooling gradient function.
|
||||
input_sizes: Input tensor dimensions.
|
||||
ksize: The kernel size dimensions
|
||||
|
@ -21,10 +21,10 @@ arguments passed to functions, and does not account for indirectly modified
|
||||
state.
|
||||
|
||||
Example:
|
||||
y = tf.layers.dense(x) # Creates TF variable 'foo'
|
||||
y = tf.compat.v1.layers.dense(x) # Creates TF variable 'foo'
|
||||
loss = loss(y)
|
||||
opt.minimize(loss) # indirectly affects 'foo'
|
||||
z = tf.get_variable('foo') # Indirectly affects `loss` and 'foo'
|
||||
z = tf.compat.v1.get_variable('foo') # Indirectly affects `loss` and 'foo'
|
||||
# Here, `loss` can be guarded. But `z` cannot.
|
||||
|
||||
# TODO(mdan): We should probably define a safe mode where we guard everything.
|
||||
@ -135,7 +135,7 @@ class SideEffectGuardTransformer(converter.Base):
|
||||
# Patterns of single function calls, like:
|
||||
# opt.minimize(loss)
|
||||
# or:
|
||||
# tf.py_func(...)
|
||||
# tf.compat.v1.py_func(...)
|
||||
|
||||
# First, attempt to gate future evaluation of args. If that's not
|
||||
# possible, gate all remaining statements (and that may fail too, see
|
||||
@ -151,9 +151,9 @@ class SideEffectGuardTransformer(converter.Base):
|
||||
|
||||
# TODO(mdan): Include all arguments which depended on guarded_args too.
|
||||
# For example, the following will still cause a race:
|
||||
# tf.assign(a, a + 1)
|
||||
# tf.compat.v1.assign(a, a + 1)
|
||||
# b = a + 1
|
||||
# tf.assign(a, a + 1) # Control deps here should include `b`
|
||||
# tf.compat.v1.assign(a, a + 1) # Control deps here should include `b`
|
||||
# c = b + 1
|
||||
# Or maybe we should just raise an "unsafe assign" error?
|
||||
|
||||
|
@ -200,7 +200,7 @@ def rewrite_tf_runtime_error(error, source_map):
|
||||
"""Rewrites TensorFlow runtime errors raised by ops created in AG code.
|
||||
|
||||
Args:
|
||||
error: tf.OpError
|
||||
error: tf.errors.OpError
|
||||
source_map: Dict[origin_info.LineLocation, origin_info.OriginInfo]
|
||||
|
||||
Returns:
|
||||
|
@ -774,14 +774,14 @@ class BaseSession(SessionInterface):
|
||||
|
||||
```python
|
||||
c = tf.constant(..)
|
||||
sess = tf.Session()
|
||||
sess = tf.compat.v1.Session()
|
||||
|
||||
with sess.as_default():
|
||||
assert tf.get_default_session() is sess
|
||||
assert tf.compat.v1.get_default_session() is sess
|
||||
print(c.eval())
|
||||
```
|
||||
|
||||
To get the current default session, use `tf.get_default_session`.
|
||||
To get the current default session, use `tf.compat.v1.get_default_session`.
|
||||
|
||||
*N.B.* The `as_default` context manager *does not* close the
|
||||
session when you exit the context, and you must close the session
|
||||
@ -789,7 +789,7 @@ class BaseSession(SessionInterface):
|
||||
|
||||
```python
|
||||
c = tf.constant(...)
|
||||
sess = tf.Session()
|
||||
sess = tf.compat.v1.Session()
|
||||
with sess.as_default():
|
||||
print(c.eval())
|
||||
# ...
|
||||
@ -799,7 +799,7 @@ class BaseSession(SessionInterface):
|
||||
sess.close()
|
||||
```
|
||||
|
||||
Alternatively, you can use `with tf.Session():` to create a
|
||||
Alternatively, you can use `with tf.compat.v1.Session():` to create a
|
||||
session that is automatically closed on exiting the context,
|
||||
including when an uncaught exception is raised.
|
||||
|
||||
@ -810,9 +810,10 @@ class BaseSession(SessionInterface):
|
||||
|
||||
*N.B.* Entering a `with sess.as_default():` block does not affect
|
||||
the current default graph. If you are using multiple graphs, and
|
||||
`sess.graph` is different from the value of `tf.get_default_graph`,
|
||||
you must explicitly enter a `with sess.graph.as_default():` block
|
||||
to make `sess.graph` the default graph.
|
||||
`sess.graph` is different from the value of
|
||||
`tf.compat.v1.get_default_graph`, you must explicitly enter a
|
||||
`with sess.graph.as_default():` block to make `sess.graph` the default
|
||||
graph.
|
||||
|
||||
Returns:
|
||||
A context manager using this session as the default session.
|
||||
@ -838,7 +839,7 @@ class BaseSession(SessionInterface):
|
||||
value of that tensor.
|
||||
* A `tf.SparseTensor`.
|
||||
The corresponding fetched value will be a
|
||||
`tf.SparseTensorValue`
|
||||
`tf.compat.v1.SparseTensorValue`
|
||||
containing the value of that sparse tensor.
|
||||
* A `get_tensor_handle` op. The corresponding fetched value will be a
|
||||
numpy ndarray containing the handle of that tensor.
|
||||
@ -878,12 +879,12 @@ class BaseSession(SessionInterface):
|
||||
value may be a Python scalar, string, list, or numpy ndarray
|
||||
that can be converted to the same `dtype` as that
|
||||
tensor. Additionally, if the key is a
|
||||
`tf.placeholder`, the shape of
|
||||
`tf.compat.v1.placeholder`, the shape of
|
||||
the value will be checked for compatibility with the placeholder.
|
||||
* If the key is a
|
||||
`tf.SparseTensor`,
|
||||
the value should be a
|
||||
`tf.SparseTensorValue`.
|
||||
`tf.compat.v1.SparseTensorValue`.
|
||||
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
|
||||
should be a nested tuple with the same structure that maps to their
|
||||
corresponding values as above.
|
||||
@ -1178,11 +1179,12 @@ class BaseSession(SessionInterface):
|
||||
feed_list: (Optional.) A list of `feed_dict` keys. See
|
||||
`tf.Session.run` for details of the allowable feed key types.
|
||||
accept_options: (Optional.) If `True`, the returned `Callable` will be
|
||||
able to accept `tf.RunOptions` and `tf.RunMetadata` as optional
|
||||
keyword arguments `options` and `run_metadata`, respectively, with
|
||||
the same syntax and semantics as `tf.Session.run`, which is useful
|
||||
for certain use cases (profiling and debugging) but will result in
|
||||
measurable slowdown of the `Callable`'s performance. Default: `False`.
|
||||
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
|
||||
as optional keyword arguments `options` and `run_metadata`,
|
||||
respectively, with the same syntax and semantics as `tf.Session.run`,
|
||||
which is useful for certain use cases (profiling and debugging) but
|
||||
will result in measurable slowdown of the `Callable`'s
|
||||
performance. Default: `False`.
|
||||
|
||||
Returns:
|
||||
A function that when called will execute the step defined by
|
||||
@ -1489,15 +1491,15 @@ class Session(BaseSession):
|
||||
c = a * b
|
||||
|
||||
# Launch the graph in a session.
|
||||
sess = tf.Session()
|
||||
sess = tf.compat.v1.Session()
|
||||
|
||||
# Evaluate the tensor `c`.
|
||||
print(sess.run(c))
|
||||
```
|
||||
|
||||
A session may own resources, such as
|
||||
`tf.Variable`, `tf.QueueBase`,
|
||||
and `tf.ReaderBase`. It is important to release
|
||||
`tf.Variable`, `tf.queue.QueueBase`,
|
||||
and `tf.compat.v1.ReaderBase`. It is important to release
|
||||
these resources when they are no longer required. To do this, either
|
||||
invoke the `tf.Session.close` method on the session, or use
|
||||
the session as a context manager. The following two examples are
|
||||
@ -1505,12 +1507,12 @@ class Session(BaseSession):
|
||||
|
||||
```python
|
||||
# Using the `close()` method.
|
||||
sess = tf.Session()
|
||||
sess = tf.compat.v1.Session()
|
||||
sess.run(...)
|
||||
sess.close()
|
||||
|
||||
# Using the context manager.
|
||||
with tf.Session() as sess:
|
||||
with tf.compat.v1.Session() as sess:
|
||||
sess.run(...)
|
||||
```
|
||||
|
||||
@ -1524,8 +1526,9 @@ class Session(BaseSession):
|
||||
```python
|
||||
# Launch the graph in a session that allows soft device placement and
|
||||
# logs the placement decisions.
|
||||
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
|
||||
log_device_placement=True))
|
||||
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
|
||||
allow_soft_placement=True,
|
||||
log_device_placement=True))
|
||||
```
|
||||
"""
|
||||
|
||||
@ -1660,7 +1663,7 @@ class InteractiveSession(BaseSession):
|
||||
For example:
|
||||
|
||||
```python
|
||||
sess = tf.InteractiveSession()
|
||||
sess = tf.compat.v1.InteractiveSession()
|
||||
a = tf.constant(5.0)
|
||||
b = tf.constant(6.0)
|
||||
c = a * b
|
||||
@ -1677,7 +1680,7 @@ class InteractiveSession(BaseSession):
|
||||
a = tf.constant(5.0)
|
||||
b = tf.constant(6.0)
|
||||
c = a * b
|
||||
with tf.Session():
|
||||
with tf.compat.v1.Session():
|
||||
# We can also use 'c.eval()' here.
|
||||
print(c.eval())
|
||||
```
|
||||
|
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Tests and benchmarks for interacting with the `tf.Session`."""
|
||||
"""Tests and benchmarks for interacting with the `tf.compat.v1.Session`."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@ -34,7 +34,7 @@ from tensorflow.python.training import server_lib
|
||||
|
||||
|
||||
class SessionBenchmark(test.Benchmark):
|
||||
"""Tests and benchmarks for interacting with the `tf.Session`."""
|
||||
"""Tests and benchmarks for interacting with the `tf.compat.v1.Session`."""
|
||||
|
||||
def _benchmarkFeed(self, name, target, size, iters):
|
||||
"""Runs a microbenchmark to measure the cost of feeding a tensor.
|
||||
|
@ -161,7 +161,7 @@ def implicit_val_and_grad(f):
|
||||
Example:
|
||||
|
||||
```python
|
||||
dense_layer = tf.layers.Dense(1)
|
||||
dense_layer = tf.compat.v1.layers.Dense(1)
|
||||
def loss(x, y):
|
||||
return tf.reduce_sum(tf.square(dense_layer(x) - y))
|
||||
|
||||
@ -175,7 +175,7 @@ def implicit_val_and_grad(f):
|
||||
print('Value of loss: %s' % value)
|
||||
|
||||
# Apply the gradients to Variables.
|
||||
optimizer = tf.train.GradientDescentOptimizer(0.1)
|
||||
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
|
||||
optimizer.apply_gradients(grads_and_vars)
|
||||
```
|
||||
|
||||
@ -235,7 +235,7 @@ def implicit_grad(f):
|
||||
Example:
|
||||
|
||||
```python
|
||||
dense_layer = tf.layers.Dense(1)
|
||||
dense_layer = tf.compat.v1.layers.Dense(1)
|
||||
def loss(x, y):
|
||||
return tf.reduce_sum(tf.square(dense_layer(x) - y))
|
||||
|
||||
@ -248,7 +248,7 @@ def implicit_grad(f):
|
||||
grads_and_vars = grad_fn(x, y)
|
||||
|
||||
# Apply the gradients to Variables.
|
||||
optimizer = tf.train.GradientDescentOptimizer(0.1)
|
||||
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
|
||||
optimizer.apply_gradients(grads_and_vars)
|
||||
```
|
||||
|
||||
@ -668,9 +668,9 @@ class GradientTape(object):
|
||||
Operations are recorded if they are executed within this context manager and
|
||||
at least one of their inputs is being "watched".
|
||||
|
||||
Trainable variables (created by `tf.Variable` or `tf.get_variable`, where
|
||||
`trainable=True` is default in both cases) are automatically watched. Tensors
|
||||
can be manually watched by invoking the `watch` method on this context
|
||||
Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,
|
||||
where `trainable=True` is default in both cases) are automatically watched.
|
||||
Tensors can be manually watched by invoking the `watch` method on this context
|
||||
manager.
|
||||
|
||||
For example, consider the function `y = x * x`. The gradient at `x = 3.0` can
|
||||
|
@ -1339,9 +1339,9 @@ def internal_operation_seed():
|
||||
def executing_eagerly():
|
||||
"""Returns True if the current thread has eager execution enabled.
|
||||
|
||||
Eager execution is typically enabled via `tf.enable_eager_execution`,
|
||||
but may also be enabled within the context of a Python function via
|
||||
tf.contrib.eager.py_func.
|
||||
Eager execution is typically enabled via
|
||||
`tf.compat.v1.enable_eager_execution`, but may also be enabled within the
|
||||
context of a Python function via tf.contrib.eager.py_func.
|
||||
"""
|
||||
if context_safe() is None:
|
||||
return default_execution_mode == EAGER_MODE
|
||||
@ -1411,7 +1411,7 @@ def device(name):
|
||||
with tf.device('gpu:0'):
|
||||
with tf.device('cpu:0'):
|
||||
shape = tf.constant([], dtype=tf.int32)
|
||||
x = tf.truncated_normal(shape, tf.float32)
|
||||
x = tf.random.truncated_normal(shape, tf.float32)
|
||||
```
|
||||
will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
|
||||
operation runs on GPU 0.
|
||||
|
@ -796,8 +796,8 @@ def function(func=None,
|
||||
def g(x):
|
||||
for i in x:
|
||||
print(i) # Works
|
||||
tf.assign(v, i) # Works
|
||||
tf.py_func(lambda i: l.append(i))(i) # Works
|
||||
tf.compat.v1.assign(v, i) # Works
|
||||
tf.compat.v1.py_func(lambda i: l.append(i))(i) # Works
|
||||
l.append(i) # Caution! Doesn't work.
|
||||
```
|
||||
|
||||
@ -823,7 +823,7 @@ def function(func=None,
|
||||
@tf.function
|
||||
def f(x):
|
||||
c.assign_add(1)
|
||||
return x + tf.to_float(c)
|
||||
return x + tf.compat.v1.to_float(c)
|
||||
|
||||
assert int(c) == 0
|
||||
assert f(1.0) == 2.0
|
||||
@ -837,7 +837,7 @@ def function(func=None,
|
||||
```python
|
||||
class Dense(object):
|
||||
def __init__(self):
|
||||
self.W = tf.Variable(tf.glorot_uniform_initializer()((10, 10)))
|
||||
self.W = tf.Variable(tf.compat.v1.glorot_uniform_initializer()((10, 10)))
|
||||
self.b = tf.Variable(tf.zeros(10))
|
||||
|
||||
@tf.function
|
||||
@ -846,7 +846,7 @@ def function(func=None,
|
||||
|
||||
d1 = Dense()
|
||||
d2 = Dense()
|
||||
x = tf.random_uniform((10, 10))
|
||||
x = tf.random.uniform((10, 10))
|
||||
# d1 and d2 are using distinct variables
|
||||
assert not (d1.compute(x).numpy() == d2.compute(x).numpy()).all()
|
||||
```
|
||||
@ -938,7 +938,7 @@ def function(func=None,
|
||||
since a particular random value generated by the `np.random.randn` call will
|
||||
be inserted in the traced/staged TensorFlow graph as a constant. In this
|
||||
particular example, replacing `np.random.randn(5, 5)` with
|
||||
`tf.random_normal((5, 5))` will result in the same behavior for `add_noise()`
|
||||
`tf.random.normal((5, 5))` will result in the same behavior for `add_noise()`
|
||||
and `traced()`.
|
||||
|
||||
_Python Side-Effects_
|
||||
@ -952,7 +952,7 @@ def function(func=None,
|
||||
|
||||
The same is true if code with Python side effects is used inside control flow,
|
||||
such as a loop. If your code uses side effects that are not intended to
|
||||
control graph construction, wrap them inside `tf.py_func`.
|
||||
control graph construction, wrap them inside `tf.compat.v1.py_func`.
|
||||
|
||||
Args:
|
||||
func: function to be compiled. If `func` is None, returns a decorator that
|
||||
|
@ -40,7 +40,7 @@ class ExecutionCallback(enum.Enum):
|
||||
IGNORE: take no action.
|
||||
PRINT: print a warning to `stdout`.
|
||||
RAISE: raise an error (e.g. `InfOrNanError`).
|
||||
WARN: print a warning using `tf.logging.warn`.
|
||||
WARN: print a warning using `tf.compat.v1.logging.warn`.
|
||||
"""
|
||||
|
||||
IGNORE = "ignore"
|
||||
@ -353,10 +353,10 @@ def errstate(inf_or_nan=None):
|
||||
|
||||
Example:
|
||||
```
|
||||
c = tf.log(0.) # -inf
|
||||
c = tf.math.log(0.) # -inf
|
||||
|
||||
with errstate(inf_or_nan=ExecutionCallback.RAISE):
|
||||
tf.log(0.) # <-- Raises InfOrNanError.
|
||||
tf.math.log(0.) # <-- Raises InfOrNanError.
|
||||
```
|
||||
|
||||
Args:
|
||||
|
@ -1702,7 +1702,7 @@ def defun(func=None,
|
||||
```python
|
||||
import tensorflow as tf
|
||||
|
||||
tf.enable_eager_execution()
|
||||
tf.compat.v1.enable_eager_execution()
|
||||
|
||||
# A simple example.
|
||||
def f(x, y):
|
||||
@ -1749,7 +1749,7 @@ def defun(func=None,
|
||||
model(x, training=False) # executes a graph, without dropout
|
||||
|
||||
# `defun`-compiled functions are differentiable.
|
||||
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
|
||||
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01)
|
||||
with tf.GradientTape() as tape:
|
||||
outputs = model(x)
|
||||
gradient = tape.gradient(outputs, model.trainable_variables)
|
||||
@ -1771,8 +1771,8 @@ def defun(func=None,
|
||||
By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph
|
||||
for every unique sequence of the shapes and dtypes of Tensor arguments and
|
||||
the values of Python objects it is invoked with. For example, calling
|
||||
`F(tf.random_uniform([2])` will execute a different graph than
|
||||
`F(tf.random_uniform([3])` because the two inputs have different shapes.
|
||||
`F(tf.random.uniform([2])` will execute a different graph than
|
||||
`F(tf.random.uniform([3])` because the two inputs have different shapes.
|
||||
The first time that `F(*args, **kwargs)` is called with a particular sequence
|
||||
of Tensor shapes and dtypes and Python values, it constructs a graph by
|
||||
tracing the execution of `f(*args, **kwargs)`; this graph is bound to an
|
||||
@ -1809,15 +1809,15 @@ def defun(func=None,
|
||||
...
|
||||
|
||||
# Note how the third dimension of the first input can vary freely.
|
||||
words = tf.random_uniform(([50, 300, 10])
|
||||
second_input = tf.random_uniform([300, 100])
|
||||
words = tf.random.uniform(([50, 300, 10])
|
||||
second_input = tf.random.uniform([300, 100])
|
||||
my_sequence_model(words, second_input)
|
||||
|
||||
words = tf.random_uniform(([50, 300, 20])
|
||||
words = tf.random.uniform(([50, 300, 20])
|
||||
my_sequence_model(words, second_input)
|
||||
|
||||
# Passing an input with an incompatible shape will raise an error.
|
||||
words = tf.random_uniform(([50, 100, 20])
|
||||
words = tf.random.uniform(([50, 100, 20])
|
||||
my_sequence_model(words, second_input) # <---- This will raise an error.
|
||||
|
||||
```
|
||||
@ -1839,7 +1839,7 @@ def defun(func=None,
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
|
||||
tf.enable_eager_execution()
|
||||
tf.compat.v1.enable_eager_execution()
|
||||
|
||||
def add_noise():
|
||||
return tf.eye(5) + np.random.randn(5, 5)
|
||||
@ -1849,7 +1849,7 @@ def defun(func=None,
|
||||
`compiled = tf.contrib.eager.defun(add_noise)` will return the same value
|
||||
every time it is called, since a particular random offset generated by NumPy
|
||||
will be inserted into the graph as a TensorFlow constant. The solution is to
|
||||
replace the call to `np.random.randn` with `tf.random_normal((5, 5))`.
|
||||
replace the call to `np.random.randn` with `tf.random.normal((5, 5))`.
|
||||
|
||||
_Python Side-Effects_
|
||||
|
||||
@ -1871,7 +1871,7 @@ def defun(func=None,
|
||||
```python
|
||||
import tensorflow as tf
|
||||
|
||||
tf.enable_eager_execution()
|
||||
tf.compat.v1.enable_eager_execution()
|
||||
|
||||
@tf.contrib.eager.defun
|
||||
def lossy_matmul(W, x, training=True):
|
||||
@ -1880,8 +1880,8 @@ def defun(func=None,
|
||||
outputs = tf.nn.dropout(outputs, keep_probability=0.2)
|
||||
return outputs
|
||||
|
||||
W = tf.random_normal((3, 5))
|
||||
x = tf.random_normal((5, 1))
|
||||
W = tf.random.normal((3, 5))
|
||||
x = tf.random.normal((5, 1))
|
||||
|
||||
# Executes a graph that applies dropout.
|
||||
lossy_outputs = lossy_matmul(W, x, training=True)
|
||||
@ -1923,7 +1923,7 @@ def defun(func=None,
|
||||
```python
|
||||
import tensorflow as tf
|
||||
|
||||
tf.enable_eager_execution()
|
||||
tf.compat.v1.enable_eager_execution()
|
||||
|
||||
def fn():
|
||||
x = tf.Variable(0.0)
|
||||
|
@ -39,7 +39,7 @@ def graph_zeros_like(tensor):
|
||||
|
||||
|
||||
def graph_placeholder(dtype, shape, name=None):
|
||||
"""Graph-only version of tf.placeholder(), for internal use only."""
|
||||
"""Graph-only version of tf.compat.v1.placeholder(), for internal use only."""
|
||||
dtype = dtype.base_dtype
|
||||
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
|
||||
if isinstance(shape, (list, tuple)):
|
||||
|
@ -38,7 +38,7 @@ def connect_to_remote_host(remote_host=None, job_name="worker"):
|
||||
follows:
|
||||
```python
|
||||
# Enable eager execution, and connect to the remote host.
|
||||
tf.enable_eager_execution()
|
||||
tf.compat.v1.enable_eager_execution()
|
||||
tf.contrib.eager.connect_to_remote_host("exampleaddr.com:9876")
|
||||
|
||||
with ops.device("job:worker/replica:0/task:1/device:CPU:0"):
|
||||
|
@ -246,12 +246,12 @@ class WrappedGraph(object):
|
||||
|
||||
```
|
||||
def add_v1(x):
|
||||
with tf.compat.v1.variable_scope('vars', reuse=tf.AUTO_REUSE):
|
||||
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
|
||||
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
|
||||
return v + x
|
||||
|
||||
def increment_var_v1(x):
|
||||
with tf.compat.v1.variable_scope('vars', reuse=tf.AUTO_REUSE):
|
||||
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
|
||||
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
|
||||
return v.assign_add(x)
|
||||
|
||||
@ -293,9 +293,10 @@ class WrappedGraph(object):
|
||||
"""Wraps a TF 1.X function and returns an eager-compatible function.
|
||||
|
||||
All functions wrapped in the same `WrappedGraph` will have access to the
|
||||
same graph (`tf.get_default_graph` to get the graph object within a
|
||||
function, or `WrappedGraph.graph` to get the graph outside a function).
|
||||
Variables created within the function will be added to the `variables` list.
|
||||
same graph (`tf.compat.v1.get_default_graph` to get the graph object
|
||||
within a function, or `WrappedGraph.graph` to get the graph outside a
|
||||
function). Variables created within the function will be added to the
|
||||
`variables` list.
|
||||
|
||||
Function inputs: All inputs to the function must be tensors (nested ok),
|
||||
with their shapes and dtypes defined in the `signature` argument.
|
||||
|
@ -250,11 +250,11 @@ def input_layer(features,
|
||||
keywords_embedded = embedding_column(
|
||||
categorical_column_with_hash_bucket("keywords", 10K), dimensions=16)
|
||||
columns = [price, keywords_embedded, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
for units in [128, 64, 32]:
|
||||
dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu)
|
||||
prediction = tf.layers.dense(dense_tensor, 1)
|
||||
dense_tensor = tf.compat.v1.layers.dense(dense_tensor, units, tf.nn.relu)
|
||||
prediction = tf.compat.v1.layers.dense(dense_tensor, 1)
|
||||
```
|
||||
|
||||
Args:
|
||||
@ -404,7 +404,7 @@ def linear_model(features,
|
||||
keywords = categorical_column_with_hash_bucket("keywords", 10K)
|
||||
keywords_price = crossed_column('keywords', price_buckets, ...)
|
||||
columns = [price_buckets, keywords, keywords_price ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -719,7 +719,7 @@ def _transform_features(features, feature_columns):
|
||||
source_column=numeric_column("price"), boundaries=[...])
|
||||
|
||||
columns = [crosses_a_x_b, price_buckets]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
transformed = transform_features(features=features, feature_columns=columns)
|
||||
|
||||
assertCountEqual(columns, transformed.keys())
|
||||
@ -750,7 +750,8 @@ def _transform_features(features, feature_columns):
|
||||
def make_parse_example_spec(feature_columns):
|
||||
"""Creates parsing spec dictionary from input feature_columns.
|
||||
|
||||
The returned dictionary can be used as arg 'features' in `tf.parse_example`.
|
||||
The returned dictionary can be used as arg 'features' in
|
||||
`tf.io.parse_example`.
|
||||
|
||||
Typical usage example:
|
||||
|
||||
@ -764,7 +765,7 @@ def make_parse_example_spec(feature_columns):
|
||||
|
||||
feature_columns = set(
|
||||
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
|
||||
features = tf.parse_example(
|
||||
features = tf.io.parse_example(
|
||||
serialized=serialized_examples,
|
||||
features=make_parse_example_spec(feature_columns))
|
||||
```
|
||||
@ -833,7 +834,7 @@ def _embedding_column(categorical_column,
|
||||
|
||||
label_column = ...
|
||||
def input_fn():
|
||||
features = tf.parse_example(
|
||||
features = tf.io.parse_example(
|
||||
..., features=make_parse_example_spec(columns + [label_column]))
|
||||
labels = features.pop(label_column.name)
|
||||
return features, labels
|
||||
@ -866,8 +867,8 @@ def _embedding_column(categorical_column,
|
||||
`tf.embedding_lookup_sparse`.
|
||||
initializer: A variable initializer function to be used in embedding
|
||||
variable initialization. If not specified, defaults to
|
||||
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
|
||||
`1/sqrt(dimension)`.
|
||||
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
|
||||
standard deviation `1/sqrt(dimension)`.
|
||||
ckpt_to_load_from: String representing checkpoint name/pattern from which to
|
||||
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
|
||||
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
|
||||
@ -934,13 +935,13 @@ def _numeric_column(key,
|
||||
```python
|
||||
price = numeric_column('price')
|
||||
columns = [price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
|
||||
# or
|
||||
bucketized_price = bucketized_column(price, boundaries=[...])
|
||||
columns = [bucketized_price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -955,7 +956,7 @@ def _numeric_column(key,
|
||||
default_value: A single value compatible with `dtype` or an iterable of
|
||||
values compatible with `dtype` which the column takes on during
|
||||
`tf.Example` parsing if data is missing. A default value of `None` will
|
||||
cause `tf.parse_example` to fail if an example does not contain this
|
||||
cause `tf.io.parse_example` to fail if an example does not contain this
|
||||
column. If a single value is provided, the same value will be applied as
|
||||
the default value for every item. If an iterable of values is provided,
|
||||
the shape of the `default_value` should be equal to the given `shape`.
|
||||
@ -1028,12 +1029,12 @@ def _bucketized_column(source_column, boundaries):
|
||||
price = numeric_column('price')
|
||||
bucketized_price = bucketized_column(price, boundaries=[...])
|
||||
columns = [bucketized_price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
|
||||
# or
|
||||
columns = [bucketized_price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1047,7 +1048,7 @@ def _bucketized_column(source_column, boundaries):
|
||||
# 'keywords' is a string feature.
|
||||
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
|
||||
columns = [price_x_keywords, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1101,13 +1102,13 @@ def _categorical_column_with_hash_bucket(key,
|
||||
```python
|
||||
keywords = categorical_column_with_hash_bucket("keywords", 10K)
|
||||
columns = [keywords, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
|
||||
# or
|
||||
keywords_embedded = embedding_column(keywords, 16)
|
||||
columns = [keywords_embedded, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1168,7 +1169,7 @@ def _categorical_column_with_vocabulary_file(key,
|
||||
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
|
||||
num_oov_buckets=5)
|
||||
columns = [states, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1183,7 +1184,7 @@ def _categorical_column_with_vocabulary_file(key,
|
||||
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
|
||||
default_value=0)
|
||||
columns = [states, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1191,7 +1192,7 @@ def _categorical_column_with_vocabulary_file(key,
|
||||
|
||||
```python
|
||||
columns = [embedding_column(states, 3),...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1285,7 +1286,7 @@ def _categorical_column_with_vocabulary_list(key,
|
||||
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
|
||||
num_oov_buckets=2)
|
||||
columns = [colors, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1299,7 +1300,7 @@ def _categorical_column_with_vocabulary_list(key,
|
||||
colors = categorical_column_with_vocabulary_list(
|
||||
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
|
||||
columns = [colors, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1307,7 +1308,7 @@ def _categorical_column_with_vocabulary_list(key,
|
||||
|
||||
```python
|
||||
columns = [embedding_column(colors, 3),...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1397,7 +1398,7 @@ def _categorical_column_with_identity(key, num_buckets, default_value=None):
|
||||
video_id = categorical_column_with_identity(
|
||||
key='video_id', num_buckets=1000000, default_value=0)
|
||||
columns = [video_id, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1405,7 +1406,7 @@ def _categorical_column_with_identity(key, num_buckets, default_value=None):
|
||||
|
||||
```python
|
||||
columns = [embedding_column(video_id, 9),...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1454,7 +1455,7 @@ def _indicator_column(categorical_column):
|
||||
name = indicator_column(categorical_column_with_vocabulary_list(
|
||||
'name', ['bob', 'george', 'wanda'])
|
||||
columns = [name, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
|
||||
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
|
||||
@ -1517,7 +1518,7 @@ def _weighted_categorical_column(categorical_column,
|
||||
weighted_column = weighted_categorical_column(
|
||||
categorical_column=categorical_column, weight_feature_key='frequencies')
|
||||
columns = [weighted_column, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1593,7 +1594,7 @@ def _crossed_column(keys, hash_bucket_size, hash_key=None):
|
||||
```python
|
||||
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
|
||||
columns = [keywords_x_doc_terms, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1604,7 +1605,7 @@ def _crossed_column(keys, hash_bucket_size, hash_key=None):
|
||||
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
|
||||
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
|
||||
columns = [keywords_x_doc_terms, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1619,7 +1620,7 @@ def _crossed_column(keys, hash_bucket_size, hash_key=None):
|
||||
bucketized_price = bucketized_column(price, boundaries=[...])
|
||||
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
|
||||
columns = [vertical_id_x_price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1785,17 +1786,17 @@ class _FeatureColumn(object):
|
||||
def _parse_example_spec(self):
|
||||
"""Returns a `tf.Example` parsing spec as dict.
|
||||
|
||||
It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
|
||||
dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
|
||||
supported objects. Please check documentation of `tf.parse_example` for all
|
||||
supported spec objects.
|
||||
It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is
|
||||
a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
|
||||
supported objects. Please check documentation of `tf.io.parse_example` for
|
||||
all supported spec objects.
|
||||
|
||||
Let's say a Feature column depends on raw feature ('raw') and another
|
||||
`_FeatureColumn` (input_fc). One possible implementation of
|
||||
_parse_example_spec is as follows:
|
||||
|
||||
```python
|
||||
spec = {'raw': tf.FixedLenFeature(...)}
|
||||
spec = {'raw': tf.io.FixedLenFeature(...)}
|
||||
spec.update(input_fc._parse_example_spec)
|
||||
return spec
|
||||
```
|
||||
@ -1945,7 +1946,7 @@ class _CategoricalColumn(_FeatureColumn):
|
||||
weight_collections: List of graph collections to which variables (if any
|
||||
will be created) are added.
|
||||
trainable: If `True` also add variables to the graph collection
|
||||
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.get_variable`).
|
||||
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.compat.v1.get_variable`).
|
||||
"""
|
||||
pass
|
||||
|
||||
@ -3126,7 +3127,7 @@ def _verify_static_batch_size_equality(tensors, columns):
|
||||
Raises:
|
||||
ValueError: if one of the tensors has a variant batch size
|
||||
"""
|
||||
# bath_size is a tf.Dimension object.
|
||||
# bath_size is a tf.compat.v1.Dimension object.
|
||||
expected_batch_size = None
|
||||
for i in range(0, len(tensors)):
|
||||
if tensors[i].shape.dims[0].value is not None:
|
||||
|
@ -400,11 +400,11 @@ class DenseFeatures(_BaseFeaturesLayer):
|
||||
columns = [price, keywords_embedded, ...]
|
||||
feature_layer = DenseFeatures(columns)
|
||||
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = feature_layer(features)
|
||||
for units in [128, 64, 32]:
|
||||
dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu)
|
||||
prediction = tf.layers.dense(dense_tensor, 1).
|
||||
dense_tensor = tf.compat.v1.layers.dense(dense_tensor, units, tf.nn.relu)
|
||||
prediction = tf.compat.v1.layers.dense(dense_tensor, 1).
|
||||
```
|
||||
"""
|
||||
|
||||
@ -607,7 +607,7 @@ class LinearModel(training.Model):
|
||||
columns = [price_buckets, keywords, keywords_price ...]
|
||||
linear_model = LinearLayer(columns)
|
||||
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
prediction = linear_model(features)
|
||||
```
|
||||
"""
|
||||
@ -725,7 +725,7 @@ def _transform_features_v2(features, feature_columns, state_manager):
|
||||
source_column=numeric_column("price"), boundaries=[...])
|
||||
|
||||
columns = [crosses_a_x_b, price_buckets]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
transformed = transform_features(features=features, feature_columns=columns)
|
||||
|
||||
assertCountEqual(columns, transformed.keys())
|
||||
@ -757,7 +757,8 @@ def _transform_features_v2(features, feature_columns, state_manager):
|
||||
def make_parse_example_spec_v2(feature_columns):
|
||||
"""Creates parsing spec dictionary from input feature_columns.
|
||||
|
||||
The returned dictionary can be used as arg 'features' in `tf.parse_example`.
|
||||
The returned dictionary can be used as arg 'features' in
|
||||
`tf.io.parse_example`.
|
||||
|
||||
Typical usage example:
|
||||
|
||||
@ -771,7 +772,7 @@ def make_parse_example_spec_v2(feature_columns):
|
||||
|
||||
feature_columns = set(
|
||||
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
|
||||
features = tf.parse_example(
|
||||
features = tf.io.parse_example(
|
||||
serialized=serialized_examples,
|
||||
features=make_parse_example_spec(feature_columns))
|
||||
```
|
||||
@ -840,7 +841,7 @@ def embedding_column(categorical_column,
|
||||
|
||||
label_column = ...
|
||||
def input_fn():
|
||||
features = tf.parse_example(
|
||||
features = tf.io.parse_example(
|
||||
..., features=make_parse_example_spec(columns + [label_column]))
|
||||
labels = features.pop(label_column.name)
|
||||
return features, labels
|
||||
@ -873,8 +874,8 @@ def embedding_column(categorical_column,
|
||||
`tf.embedding_lookup_sparse`.
|
||||
initializer: A variable initializer function to be used in embedding
|
||||
variable initialization. If not specified, defaults to
|
||||
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
|
||||
`1/sqrt(dimension)`.
|
||||
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
|
||||
standard deviation `1/sqrt(dimension)`.
|
||||
ckpt_to_load_from: String representing checkpoint name/pattern from which to
|
||||
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
|
||||
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
|
||||
@ -957,7 +958,7 @@ def shared_embedding_columns(categorical_columns,
|
||||
|
||||
label_column = ...
|
||||
def input_fn():
|
||||
features = tf.parse_example(
|
||||
features = tf.io.parse_example(
|
||||
..., features=make_parse_example_spec(columns + [label_column]))
|
||||
labels = features.pop(label_column.name)
|
||||
return features, labels
|
||||
@ -996,8 +997,8 @@ def shared_embedding_columns(categorical_columns,
|
||||
`tf.embedding_lookup_sparse`.
|
||||
initializer: A variable initializer function to be used in embedding
|
||||
variable initialization. If not specified, defaults to
|
||||
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
|
||||
`1/sqrt(dimension)`.
|
||||
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
|
||||
standard deviation `1/sqrt(dimension)`.
|
||||
shared_embedding_collection_name: Optional name of the collection where
|
||||
shared embedding weights are added. If not given, a reasonable name will
|
||||
be chosen based on the names of `categorical_columns`. This is also used
|
||||
@ -1130,7 +1131,7 @@ def shared_embedding_columns_v2(categorical_columns,
|
||||
|
||||
label_column = ...
|
||||
def input_fn():
|
||||
features = tf.parse_example(
|
||||
features = tf.io.parse_example(
|
||||
..., features=make_parse_example_spec(columns + [label_column]))
|
||||
labels = features.pop(label_column.name)
|
||||
return features, labels
|
||||
@ -1169,8 +1170,8 @@ def shared_embedding_columns_v2(categorical_columns,
|
||||
`tf.embedding_lookup_sparse`.
|
||||
initializer: A variable initializer function to be used in embedding
|
||||
variable initialization. If not specified, defaults to
|
||||
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
|
||||
`1/sqrt(dimension)`.
|
||||
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and standard
|
||||
deviation `1/sqrt(dimension)`.
|
||||
shared_embedding_collection_name: Optional collective name of these columns.
|
||||
If not given, a reasonable name will be chosen based on the names of
|
||||
`categorical_columns`.
|
||||
@ -1270,13 +1271,13 @@ def numeric_column(key,
|
||||
```python
|
||||
price = numeric_column('price')
|
||||
columns = [price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
|
||||
# or
|
||||
bucketized_price = bucketized_column(price, boundaries=[...])
|
||||
columns = [bucketized_price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1291,7 +1292,7 @@ def numeric_column(key,
|
||||
default_value: A single value compatible with `dtype` or an iterable of
|
||||
values compatible with `dtype` which the column takes on during
|
||||
`tf.Example` parsing if data is missing. A default value of `None` will
|
||||
cause `tf.parse_example` to fail if an example does not contain this
|
||||
cause `tf.io.parse_example` to fail if an example does not contain this
|
||||
column. If a single value is provided, the same value will be applied as
|
||||
the default value for every item. If an iterable of values is provided,
|
||||
the shape of the `default_value` should be equal to the given `shape`.
|
||||
@ -1365,12 +1366,12 @@ def bucketized_column(source_column, boundaries):
|
||||
price = numeric_column('price')
|
||||
bucketized_price = bucketized_column(price, boundaries=[...])
|
||||
columns = [bucketized_price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
|
||||
# or
|
||||
columns = [bucketized_price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1384,7 +1385,7 @@ def bucketized_column(source_column, boundaries):
|
||||
# 'keywords' is a string feature.
|
||||
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
|
||||
columns = [price_x_keywords, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1440,13 +1441,13 @@ def categorical_column_with_hash_bucket(key,
|
||||
```python
|
||||
keywords = categorical_column_with_hash_bucket("keywords", 10K)
|
||||
columns = [keywords, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
|
||||
# or
|
||||
keywords_embedded = embedding_column(keywords, 16)
|
||||
columns = [keywords_embedded, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1508,7 +1509,7 @@ def categorical_column_with_vocabulary_file(key,
|
||||
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
|
||||
num_oov_buckets=5)
|
||||
columns = [states, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1523,7 +1524,7 @@ def categorical_column_with_vocabulary_file(key,
|
||||
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
|
||||
default_value=0)
|
||||
columns = [states, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1531,7 +1532,7 @@ def categorical_column_with_vocabulary_file(key,
|
||||
|
||||
```python
|
||||
columns = [embedding_column(states, 3),...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1599,7 +1600,7 @@ def categorical_column_with_vocabulary_file_v2(key,
|
||||
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
|
||||
num_oov_buckets=5)
|
||||
columns = [states, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1614,7 +1615,7 @@ def categorical_column_with_vocabulary_file_v2(key,
|
||||
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
|
||||
default_value=0)
|
||||
columns = [states, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1622,7 +1623,7 @@ def categorical_column_with_vocabulary_file_v2(key,
|
||||
|
||||
```python
|
||||
columns = [embedding_column(states, 3),...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1717,7 +1718,7 @@ def categorical_column_with_vocabulary_list(key,
|
||||
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
|
||||
num_oov_buckets=2)
|
||||
columns = [colors, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1731,7 +1732,7 @@ def categorical_column_with_vocabulary_list(key,
|
||||
colors = categorical_column_with_vocabulary_list(
|
||||
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
|
||||
columns = [colors, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1739,7 +1740,7 @@ def categorical_column_with_vocabulary_list(key,
|
||||
|
||||
```python
|
||||
columns = [embedding_column(colors, 3),...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1833,7 +1834,7 @@ def categorical_column_with_identity(key, num_buckets, default_value=None):
|
||||
video_id = categorical_column_with_identity(
|
||||
key='video_id', num_buckets=1000000, default_value=0)
|
||||
columns = [video_id, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -1841,7 +1842,7 @@ def categorical_column_with_identity(key, num_buckets, default_value=None):
|
||||
|
||||
```python
|
||||
columns = [embedding_column(video_id, 9),...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
```
|
||||
|
||||
@ -1891,7 +1892,7 @@ def indicator_column(categorical_column):
|
||||
name = indicator_column(categorical_column_with_vocabulary_list(
|
||||
'name', ['bob', 'george', 'wanda'])
|
||||
columns = [name, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
dense_tensor = input_layer(features, columns)
|
||||
|
||||
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
|
||||
@ -1955,7 +1956,7 @@ def weighted_categorical_column(categorical_column,
|
||||
weighted_column = weighted_categorical_column(
|
||||
categorical_column=categorical_column, weight_feature_key='frequencies')
|
||||
columns = [weighted_column, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction, _, _ = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -2032,7 +2033,7 @@ def crossed_column(keys, hash_bucket_size, hash_key=None):
|
||||
```python
|
||||
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
|
||||
columns = [keywords_x_doc_terms, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -2043,7 +2044,7 @@ def crossed_column(keys, hash_bucket_size, hash_key=None):
|
||||
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
|
||||
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
|
||||
columns = [keywords_x_doc_terms, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -2058,7 +2059,7 @@ def crossed_column(keys, hash_bucket_size, hash_key=None):
|
||||
bucketized_price = bucketized_column(price, boundaries=[...])
|
||||
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
|
||||
columns = [vertical_id_x_price, ...]
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
linear_prediction = linear_model(features, columns)
|
||||
```
|
||||
|
||||
@ -2167,17 +2168,17 @@ class FeatureColumn(object):
|
||||
def parse_example_spec(self):
|
||||
"""Returns a `tf.Example` parsing spec as dict.
|
||||
|
||||
It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
|
||||
dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
|
||||
supported objects. Please check documentation of `tf.parse_example` for all
|
||||
supported spec objects.
|
||||
It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is
|
||||
a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
|
||||
supported objects. Please check documentation of `tf.io.parse_example` for
|
||||
all supported spec objects.
|
||||
|
||||
Let's say a Feature column depends on raw feature ('raw') and another
|
||||
`FeatureColumn` (input_fc). One possible implementation of
|
||||
parse_example_spec is as follows:
|
||||
|
||||
```python
|
||||
spec = {'raw': tf.FixedLenFeature(...)}
|
||||
spec = {'raw': tf.io.FixedLenFeature(...)}
|
||||
spec.update(input_fc.parse_example_spec)
|
||||
return spec
|
||||
```
|
||||
@ -4391,7 +4392,7 @@ def _verify_static_batch_size_equality(tensors, columns):
|
||||
Raises:
|
||||
ValueError: in case of mismatched batch sizes.
|
||||
"""
|
||||
# bath_size is a tf.Dimension object.
|
||||
# bath_size is a tf.compat.v1.Dimension object.
|
||||
expected_batch_size = None
|
||||
for i in range(0, len(tensors)):
|
||||
batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(
|
||||
|
@ -66,7 +66,8 @@ class SequenceFeatures(fc._BaseFeaturesLayer):
|
||||
columns = [rating, watches_embedding]
|
||||
|
||||
sequence_input_layer = SequenceFeatures(columns)
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(...,
|
||||
features=make_parse_example_spec(columns))
|
||||
sequence_input, sequence_length = sequence_input_layer(features)
|
||||
sequence_length_mask = tf.sequence_mask(sequence_length)
|
||||
|
||||
@ -214,7 +215,7 @@ def sequence_categorical_column_with_identity(
|
||||
watches_embedding = embedding_column(watches, dimension=10)
|
||||
columns = [watches_embedding]
|
||||
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
sequence_feature_layer = SequenceFeatures(columns)
|
||||
sequence_input, sequence_length = sequence_feature_layer(features)
|
||||
sequence_length_mask = tf.sequence_mask(sequence_length)
|
||||
@ -263,7 +264,7 @@ def sequence_categorical_column_with_hash_bucket(
|
||||
tokens_embedding = embedding_column(tokens, dimension=10)
|
||||
columns = [tokens_embedding]
|
||||
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
sequence_feature_layer = SequenceFeatures(columns)
|
||||
sequence_input, sequence_length = sequence_feature_layer(features)
|
||||
sequence_length_mask = tf.sequence_mask(sequence_length)
|
||||
@ -311,7 +312,7 @@ def sequence_categorical_column_with_vocabulary_file(
|
||||
states_embedding = embedding_column(states, dimension=10)
|
||||
columns = [states_embedding]
|
||||
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
sequence_feature_layer = SequenceFeatures(columns)
|
||||
sequence_input, sequence_length = sequence_feature_layer(features)
|
||||
sequence_length_mask = tf.sequence_mask(sequence_length)
|
||||
@ -375,7 +376,7 @@ def sequence_categorical_column_with_vocabulary_list(
|
||||
colors_embedding = embedding_column(colors, dimension=3)
|
||||
columns = [colors_embedding]
|
||||
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
sequence_feature_layer = SequenceFeatures(columns)
|
||||
sequence_input, sequence_length = sequence_feature_layer(features)
|
||||
sequence_length_mask = tf.sequence_mask(sequence_length)
|
||||
@ -434,7 +435,7 @@ def sequence_numeric_column(
|
||||
temperature = sequence_numeric_column('temperature')
|
||||
columns = [temperature]
|
||||
|
||||
features = tf.parse_example(..., features=make_parse_example_spec(columns))
|
||||
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
|
||||
sequence_feature_layer = SequenceFeatures(columns)
|
||||
sequence_input, sequence_length = sequence_feature_layer(features)
|
||||
sequence_length_mask = tf.sequence_mask(sequence_length)
|
||||
|
@ -110,7 +110,8 @@ class Controller(object):
|
||||
"""At this time, this method evaluates ONLY ONE placement.
|
||||
|
||||
Args:
|
||||
sess: a tf.Session() object used to retrieve cached assignment info.
|
||||
sess: a tf.compat.v1.Session() object used to retrieve cached assignment
|
||||
info.
|
||||
*args: "".
|
||||
**kwargs: "".
|
||||
|
||||
|
@ -1037,7 +1037,7 @@ class HierarchicalController(Controller):
|
||||
Args:
|
||||
loss: scalar tf tensor
|
||||
tf_variables: list of training variables, typically
|
||||
tf.trainable_variables()
|
||||
tf.compat.v1.trainable_variables()
|
||||
global_step: global_step
|
||||
grad_bound: max gradient norm
|
||||
lr_init: initial learning rate
|
||||
|
@ -64,8 +64,8 @@ def keras_style_scope():
|
||||
|
||||
def __init__(self, name):
|
||||
super(RNNModel, self.).__init__(name=name)
|
||||
self.rnn = tf.nn.rnn_cell.MultiRNNCell(
|
||||
[tf.nn.rnn_cell.LSTMCell(64) for _ in range(2)])
|
||||
self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
|
||||
[tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)])
|
||||
|
||||
def call(self, input, state):
|
||||
return self.rnn(input, state)
|
||||
@ -339,10 +339,10 @@ class Layer(base_layer.Layer):
|
||||
provided, when the requested variable is created it will be split
|
||||
into multiple partitions according to `partitioner`. In this case,
|
||||
an instance of `PartitionedVariable` is returned. Available
|
||||
partitioners include `tf.fixed_size_partitioner` and
|
||||
`tf.variable_axis_size_partitioner`. For more details, see the
|
||||
documentation of `tf.get_variable` and the "Variable Partitioners
|
||||
and Sharding" section of the API guide.
|
||||
partitioners include `tf.compat.v1.fixed_size_partitioner` and
|
||||
`tf.compat.v1.variable_axis_size_partitioner`. For more details, see
|
||||
the documentation of `tf.compat.v1.get_variable` and the "Variable
|
||||
Partitioners and Sharding" section of the API guide.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
|
@ -47,7 +47,7 @@ class Dense(keras_layers.Dense, base.Layer):
|
||||
use_bias: Boolean, whether the layer uses a bias.
|
||||
kernel_initializer: Initializer function for the weight matrix.
|
||||
If `None` (default), weights are initialized using the default
|
||||
initializer used by `tf.get_variable`.
|
||||
initializer used by `tf.compat.v1.get_variable`.
|
||||
bias_initializer: Initializer function for the bias.
|
||||
kernel_regularizer: Regularizer function for the weight matrix.
|
||||
bias_regularizer: Regularizer function for the bias.
|
||||
@ -145,7 +145,7 @@ def dense(
|
||||
use_bias: Boolean, whether the layer uses a bias.
|
||||
kernel_initializer: Initializer function for the weight matrix.
|
||||
If `None` (default), weights are initialized using the default
|
||||
initializer used by `tf.get_variable`.
|
||||
initializer used by `tf.compat.v1.get_variable`.
|
||||
bias_initializer: Initializer function for the bias.
|
||||
kernel_regularizer: Regularizer function for the weight matrix.
|
||||
bias_regularizer: Regularizer function for the bias.
|
||||
@ -207,7 +207,7 @@ class Dropout(keras_layers.Dropout, base.Layer):
|
||||
to be the same for all timesteps, you can use
|
||||
`noise_shape=[batch_size, 1, features]`.
|
||||
seed: A Python integer. Used to create random seeds. See
|
||||
`tf.set_random_seed`.
|
||||
`tf.compat.v1.set_random_seed`.
|
||||
for behavior.
|
||||
name: The name of the layer (string).
|
||||
"""
|
||||
@ -255,7 +255,7 @@ def dropout(inputs,
|
||||
to be the same for all timesteps, you can use
|
||||
`noise_shape=[batch_size, 1, features]`.
|
||||
seed: A Python integer. Used to create random seeds. See
|
||||
`tf.set_random_seed`
|
||||
`tf.compat.v1.set_random_seed`
|
||||
for behavior.
|
||||
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
|
||||
(e.g. a placeholder). Whether to return the output in training mode
|
||||
@ -286,11 +286,11 @@ class Flatten(keras_layers.Flatten, base.Layer):
|
||||
Examples:
|
||||
|
||||
```
|
||||
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
|
||||
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
|
||||
y = Flatten()(x)
|
||||
# now `y` has shape `(None, 16)`
|
||||
|
||||
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
|
||||
x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')
|
||||
y = Flatten()(x)
|
||||
# now `y` has shape `(None, None)`
|
||||
```
|
||||
@ -320,11 +320,11 @@ def flatten(inputs, name=None, data_format='channels_last'):
|
||||
Examples:
|
||||
|
||||
```
|
||||
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
|
||||
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
|
||||
y = flatten(x)
|
||||
# now `y` has shape `(None, 16)`
|
||||
|
||||
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
|
||||
x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')
|
||||
y = flatten(x)
|
||||
# now `y` has shape `(None, None)`
|
||||
```
|
||||
|
@ -106,8 +106,8 @@ class BatchNormalization(keras_layers.BatchNormalization, base.Layer):
|
||||
normalized values (before gamma and beta), only during training. For
|
||||
example, if axis==-1,
|
||||
`adjustment = lambda shape: (
|
||||
tf.random_uniform(shape[-1:], 0.93, 1.07),
|
||||
tf.random_uniform(shape[-1:], -0.1, 0.1))`
|
||||
tf.random.uniform(shape[-1:], 0.93, 1.07),
|
||||
tf.random.uniform(shape[-1:], -0.1, 0.1))`
|
||||
will scale the normalized value by up to 7% up or down, then shift the
|
||||
result by up to 0.1 (with independent scaling and bias for each feature
|
||||
but shared across all examples), and finally apply gamma and/or beta. If
|
||||
@ -214,11 +214,11 @@ def batch_normalization(inputs,
|
||||
example:
|
||||
|
||||
```python
|
||||
x_norm = tf.layers.batch_normalization(x, training=training)
|
||||
x_norm = tf.compat.v1.layers.batch_normalization(x, training=training)
|
||||
|
||||
# ...
|
||||
|
||||
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
update_ops = tf.compat.v1.get_collection(tf.GraphKeys.UPDATE_OPS)
|
||||
train_op = optimizer.minimize(loss)
|
||||
train_op = tf.group([train_op, update_ops])
|
||||
```
|
||||
@ -286,8 +286,8 @@ def batch_normalization(inputs,
|
||||
normalized values (before gamma and beta), only during training. For
|
||||
example, if axis==-1,
|
||||
`adjustment = lambda shape: (
|
||||
tf.random_uniform(shape[-1:], 0.93, 1.07),
|
||||
tf.random_uniform(shape[-1:], -0.1, 0.1))`
|
||||
tf.random.uniform(shape[-1:], 0.93, 1.07),
|
||||
tf.random.uniform(shape[-1:], -0.1, 0.1))`
|
||||
will scale the normalized value by up to 7% up or down, then shift the
|
||||
result by up to 0.1 (with independent scaling and bias for each feature
|
||||
but shared across all examples), and finally apply gamma and/or beta. If
|
||||
|
@ -41,7 +41,7 @@ class Module(tracking.AutoTrackable):
|
||||
... def __init__(self, in_features, output_features, name=None):
|
||||
... super(Dense, self).__init__(name=name)
|
||||
... self.w = tf.Variable(
|
||||
... tf.random_normal([input_features, output_features]), name='w')
|
||||
... tf.random.normal([input_features, output_features]), name='w')
|
||||
... self.b = tf.Variable(tf.zeros([output_features]), name='b')
|
||||
...
|
||||
... def __call__(self, x):
|
||||
|
@ -216,7 +216,7 @@ class Benchmark(six.with_metaclass(_BenchmarkRegistrar, object)):
|
||||
|
||||
@tf_export("test.benchmark_config")
|
||||
def benchmark_config():
|
||||
"""Returns a tf.ConfigProto for disabling the dependency optimizer.
|
||||
"""Returns a tf.compat.v1.ConfigProto for disabling the dependency optimizer.
|
||||
|
||||
Returns:
|
||||
A TensorFlow ConfigProto object.
|
||||
|
@ -17,8 +17,8 @@
|
||||
|
||||
See the [Testing](https://tensorflow.org/api_guides/python/test) guide.
|
||||
|
||||
Note: `tf.test.mock` is an alias to the python `mock` or `unittest.mock`
|
||||
depending on the python version.
|
||||
Note: `tf.compat.v1.test.mock` is an alias to the python `mock` or
|
||||
`unittest.mock` depending on the python version.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
@ -135,9 +135,9 @@ class Profiler(object):
|
||||
|
||||
for i in xrange(total_steps):
|
||||
if i % 10000 == 0:
|
||||
run_meta = tf.RunMetadata()
|
||||
run_meta = tf.compat.v1.RunMetadata()
|
||||
_ = sess.run(...,
|
||||
options=tf.RunOptions(
|
||||
options=tf.compat.v1.RunOptions(
|
||||
trace_level=tf.RunOptions.FULL_TRACE),
|
||||
run_metadata=run_meta)
|
||||
profiler.add_step(i, run_meta)
|
||||
|
@ -37,7 +37,7 @@ class ProfileOptionBuilder(object):
|
||||
tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
|
||||
|
||||
# Or, build your own options:
|
||||
opts = (tf.profiler.ProfileOptionBuilder()
|
||||
opts = (tf.compat.v1.profiler.ProfileOptionBuilder()
|
||||
.with_max_depth(10)
|
||||
.with_min_micros(1000)
|
||||
.select(['accelerator_micros'])
|
||||
@ -45,13 +45,13 @@ class ProfileOptionBuilder(object):
|
||||
.build()
|
||||
|
||||
# Or customize the pre-built options:
|
||||
opts = (tf.profiler.ProfileOptionBuilder(
|
||||
opts = (tf.compat.v1.profiler.ProfileOptionBuilder(
|
||||
tf.profiler.ProfileOptionBuilder.time_and_memory())
|
||||
.with_displaying_options(show_name_regexes=['.*rnn.*'])
|
||||
.build())
|
||||
|
||||
# Finally, profiling with the options:
|
||||
_ = tf.profiler.profile(tf.get_default_graph(),
|
||||
_ = tf.compat.v1.profiler.profile(tf.compat.v1.get_default_graph(),
|
||||
run_meta=run_meta,
|
||||
cmd='scope',
|
||||
options=opts)
|
||||
|
@ -21,10 +21,10 @@ The following needs to be set for profiler to work:
|
||||
* run_metadata object should be passed in to session.run call
|
||||
|
||||
Sample usage:
|
||||
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
|
||||
run_metadata = tf.RunMetadata()
|
||||
options = tf.compat.v1.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
|
||||
run_metadata = tf.compat.v1.RunMetadata()
|
||||
|
||||
with tf.Session as sess:
|
||||
with tf.compat.v1.Session as sess:
|
||||
...
|
||||
sess.run(computation, run_metadata=run_metadata, options=options)
|
||||
pprof_profiler.profile(sess.graph, run_metadata, output_dir)
|
||||
|
@ -82,8 +82,8 @@ def _get_logged_ops(graph, run_meta=None, add_trace=True,
|
||||
graph: tf.Graph.
|
||||
run_meta: RunMetadata proto used to complete shape information.
|
||||
add_trace: Whether to add op trace information.
|
||||
add_trainable_var: Whether to assign tf.trainable_variables() op type
|
||||
'_trainable_variables'.
|
||||
add_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op
|
||||
type '_trainable_variables'.
|
||||
Returns:
|
||||
logged_ops: dict mapping from op_name to OpLogEntry.
|
||||
string_to_id: dict mapping from string to id.
|
||||
@ -151,8 +151,8 @@ def merge_default_with_oplog(graph, op_log=None, run_meta=None,
|
||||
op_log: OpLogProto proto.
|
||||
run_meta: RunMetadata proto used to complete shape information.
|
||||
add_trace: Whether to add op trace information.
|
||||
add_trainable_var: Whether to assign tf.trainable_variables() op type
|
||||
'_trainable_variables'.
|
||||
add_trainable_var: Whether to assign tf.compat.v1.trainable_variables() op
|
||||
type '_trainable_variables'.
|
||||
Returns:
|
||||
tmp_op_log: Merged OpLogProto proto.
|
||||
"""
|
||||
@ -192,8 +192,8 @@ def merge_default_with_oplog(graph, op_log=None, run_meta=None,
|
||||
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
|
||||
"""Log provided 'op_log', and add additional model information below.
|
||||
|
||||
The API also assigns ops in tf.trainable_variables() an op type called
|
||||
'_trainable_variables'.
|
||||
The API also assigns ops in tf.compat.v1.trainable_variables() an op type
|
||||
called '_trainable_variables'.
|
||||
The API also logs 'flops' statistics for ops with op.RegisterStatistics()
|
||||
defined. flops calculation depends on Tensor shapes defined in 'graph',
|
||||
which might not be complete. 'run_meta', if provided, completes the shape
|
||||
|
@ -21,7 +21,7 @@ images, etc.
|
||||
This module contains methods that allow plugin assets to be specified at graph
|
||||
construction time. Plugin authors define a PluginAsset which is treated as a
|
||||
singleton on a per-graph basis. The PluginAsset has an assets method which
|
||||
returns a dictionary of asset contents. The tf.summary.FileWriter
|
||||
returns a dictionary of asset contents. The tf.compat.v1.summary.FileWriter
|
||||
(or any other Summary writer) will serialize these assets in such a way that
|
||||
TensorBoard can retrieve them.
|
||||
"""
|
||||
@ -123,9 +123,9 @@ class PluginAsset(object):
|
||||
- It is constructed when get_plugin_asset is called on the class for
|
||||
the first time.
|
||||
- It is configured by code that follows the calls to get_plugin_asset
|
||||
- When the containing graph is serialized by the tf.summary.FileWriter, the
|
||||
writer calls assets and the PluginAsset instance provides its contents to be
|
||||
written to disk.
|
||||
- When the containing graph is serialized by the
|
||||
tf.compat.v1.summary.FileWriter, the writer calls assets and the
|
||||
PluginAsset instance provides its contents to be written to disk.
|
||||
"""
|
||||
|
||||
plugin_name = None
|
||||
@ -137,7 +137,7 @@ class PluginAsset(object):
|
||||
The assets method should return a dictionary structured as
|
||||
{asset_name: asset_contents}. asset_contents is a string.
|
||||
|
||||
This method will be called by the tf.summary.FileWriter when it is time to
|
||||
write the assets out to disk.
|
||||
This method will be called by the tf.compat.v1.summary.FileWriter when it
|
||||
is time to write the assets out to disk.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
@ -35,7 +35,7 @@ def summary_iterator(path):
|
||||
Example: Print the contents of an events file.
|
||||
|
||||
```python
|
||||
for e in tf.train.summary_iterator(path to events file):
|
||||
for e in tf.compat.v1.train.summary_iterator(path to events file):
|
||||
print(e)
|
||||
```
|
||||
|
||||
@ -45,8 +45,8 @@ def summary_iterator(path):
|
||||
# This example supposes that the events file contains summaries with a
|
||||
# summary value tag 'loss'. These could have been added by calling
|
||||
# `add_summary()`, passing the output of a scalar summary op created with
|
||||
# with: `tf.summary.scalar('loss', loss_tensor)`.
|
||||
for e in tf.train.summary_iterator(path to events file):
|
||||
# with: `tf.compat.v1.summary.scalar('loss', loss_tensor)`.
|
||||
for e in tf.compat.v1.train.summary_iterator(path to events file):
|
||||
for v in e.summary.value:
|
||||
if v.tag == 'loss':
|
||||
print(v.simple_value)
|
||||
|
@ -61,8 +61,9 @@ class EventFileWriterV2(object):
|
||||
no effect. See `tf.contrib.summary.create_file_writer` for details.
|
||||
|
||||
Args:
|
||||
session: A `tf.Session`. Session that will hold shared writer resource.
|
||||
The writer ops will be added to session.graph during this init call.
|
||||
session: A `tf.compat.v1.Session`. Session that will hold shared writer
|
||||
resource. The writer ops will be added to session.graph during this
|
||||
init call.
|
||||
logdir: A string. Directory where event file will be written.
|
||||
max_queue: Integer. Size of the queue for pending events and summaries.
|
||||
flush_secs: Number. How often, in seconds, to flush the
|
||||
|
@ -65,9 +65,9 @@ class SummaryToEventTransformer(object):
|
||||
```python
|
||||
...create a graph...
|
||||
# Launch the graph in a session.
|
||||
sess = tf.Session()
|
||||
sess = tf.compat.v1.Session()
|
||||
# Create a summary writer, add the 'graph' to the event file.
|
||||
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
|
||||
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
|
||||
```
|
||||
|
||||
|
||||
@ -107,7 +107,7 @@ class SummaryToEventTransformer(object):
|
||||
You can pass the result of evaluating any summary op, using
|
||||
`tf.Session.run` or
|
||||
`tf.Tensor.eval`, to this
|
||||
function. Alternatively, you can pass a `tf.Summary` protocol
|
||||
function. Alternatively, you can pass a `tf.compat.v1.Summary` protocol
|
||||
buffer that you populate with your own data. The latter is
|
||||
commonly done to report evaluation results in event files.
|
||||
|
||||
@ -289,10 +289,10 @@ class FileWriter(SummaryToEventTransformer):
|
||||
to add data to the file directly from the training loop, without slowing down
|
||||
training.
|
||||
|
||||
When constructed with a `tf.Session` parameter, a `FileWriter` instead forms
|
||||
a compatibility layer over new graph-based summaries (`tf.contrib.summary`)
|
||||
to facilitate the use of new summary writing with pre-existing code that
|
||||
expects a `FileWriter` instance.
|
||||
When constructed with a `tf.compat.v1.Session` parameter, a `FileWriter`
|
||||
instead forms a compatibility layer over new graph-based summaries
|
||||
(`tf.contrib.summary`) to facilitate the use of new summary writing with
|
||||
pre-existing code that expects a `FileWriter` instance.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
@ -320,9 +320,9 @@ class FileWriter(SummaryToEventTransformer):
|
||||
```python
|
||||
...create a graph...
|
||||
# Launch the graph in a session.
|
||||
sess = tf.Session()
|
||||
sess = tf.compat.v1.Session()
|
||||
# Create a summary writer, add the 'graph' to the event file.
|
||||
writer = tf.summary.FileWriter(<some-directory>, sess.graph)
|
||||
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
|
||||
```
|
||||
|
||||
The `session` argument to the constructor makes the returned `FileWriter` a
|
||||
@ -345,7 +345,7 @@ class FileWriter(SummaryToEventTransformer):
|
||||
graph_def: DEPRECATED: Use the `graph` argument instead.
|
||||
filename_suffix: A string. Every event file's name is suffixed with
|
||||
`suffix`.
|
||||
session: A `tf.Session` object. See details above.
|
||||
session: A `tf.compat.v1.Session` object. See details above.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If called with eager execution enabled.
|
||||
|
@ -33,7 +33,8 @@ def _get_custom_getter():
|
||||
|
||||
```python
|
||||
network = ConvNetBuilder(...)
|
||||
with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
|
||||
with tf.compat.v1.variable_scope('cg',
|
||||
custom_getter=network.get_custom_getter()):
|
||||
network.conv(...)
|
||||
# Call more methods of network here
|
||||
```
|
||||
|
@ -61,8 +61,8 @@ def embedding_column(categorical_column,
|
||||
`tf.feature_column.embedding_column`.
|
||||
initializer: A variable initializer function to be used in embedding
|
||||
variable initialization. If not specified, defaults to
|
||||
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
|
||||
`1/sqrt(dimension)`.
|
||||
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
|
||||
standard deviation `1/sqrt(dimension)`.
|
||||
|
||||
Returns:
|
||||
A _TPUEmbeddingColumn.
|
||||
|
@ -62,7 +62,7 @@ class WorkerHeartbeatManager(object):
|
||||
(Prefer using `WorkerHeartbeatManager.from_devices` when possible.)
|
||||
|
||||
Args:
|
||||
session: `tf.Session`, session to use for heartbeat operations.
|
||||
session: `tf.compat.v1.Session`, session to use for heartbeat operations.
|
||||
devices: `list[string]` Set of devices to connect to.
|
||||
heartbeat_ops: `list[tf.Operation]` Heartbeat operations.
|
||||
request_placeholder: `tf.Placeholder[String]` Placeholder used to specify
|
||||
|
@ -542,12 +542,13 @@ def replicate(computation,
|
||||
name: (Deprecated) Does nothing.
|
||||
maximum_shapes: A nested structure of tf.TensorShape representing the shape
|
||||
to which the respective component of each input element in each replica
|
||||
should be padded. Any unknown dimensions (e.g. tf.Dimension(None) in a
|
||||
tf.TensorShape or -1 in a tensor-like object) will be padded to the
|
||||
maximum size of that dimension over all replicas. Note that if the input
|
||||
dimension is already static, we won't do padding on it and we require the
|
||||
maximum_shapes to have the same value or None on that dimension. The
|
||||
structure of `maximum_shapes` needs to be the same as `inputs[0]`.
|
||||
should be padded. Any unknown dimensions (e.g.
|
||||
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
|
||||
object) will be padded to the maximum size of that dimension over all
|
||||
replicas. Note that if the input dimension is already static, we won't do
|
||||
padding on it and we require the maximum_shapes to have the same value or
|
||||
None on that dimension. The structure of `maximum_shapes` needs to be the
|
||||
same as `inputs[0]`.
|
||||
Returns:
|
||||
A list of outputs, indexed by `[replica_num]` each output can be a nested
|
||||
structure same as what computation() returns with a few exceptions.
|
||||
@ -699,12 +700,13 @@ def split_compile_and_replicate(computation,
|
||||
placed on GPU if one is available, and on CPU if not).
|
||||
maximum_shapes: A nested structure of tf.TensorShape representing the shape
|
||||
to which the respective component of each input element in each replica
|
||||
should be padded. Any unknown dimensions (e.g. tf.Dimension(None) in a
|
||||
tf.TensorShape or -1 in a tensor-like object) will be padded to the
|
||||
maximum size of that dimension over all replicas. Note that if the input
|
||||
dimension is already static, we won't do padding on it and we require the
|
||||
maximum_shapes to have the same value or None on that dimension. The
|
||||
structure of `maximum_shapes` needs to be the same as `inputs[0]`.
|
||||
should be padded. Any unknown dimensions (e.g.
|
||||
tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like
|
||||
object) will be padded to the maximum size of that dimension over all
|
||||
replicas. Note that if the input dimension is already static, we won't do
|
||||
padding on it and we require the maximum_shapes to have the same value or
|
||||
None on that dimension. The structure of `maximum_shapes` needs to be the
|
||||
same as `inputs[0]`.
|
||||
|
||||
Returns:
|
||||
A list of lists with the first list corresponding to the compile op and the
|
||||
@ -1491,13 +1493,14 @@ def validate_inference_rewrite_for_variables(graph):
|
||||
|
||||
The rewrite_for_inference() method is supposed to append GuaranteeConstOps
|
||||
after ReadVariableOps, but this mechanism works only if you are using
|
||||
tf.get_variable() to create and access variables in your tpu computation.
|
||||
This validation method can be called immediately after calling
|
||||
tf.compat.v1.get_variable() to create and access variables in your tpu
|
||||
computation. This validation method can be called immediately after calling
|
||||
tpu.rewrite_for_inference() to check whether GuaranteeConstOps where added
|
||||
to the graph.
|
||||
|
||||
Typical usages:
|
||||
tpu.validate_inference_rewrite_for_variables(tf.get_default_graph())
|
||||
tpu.validate_inference_rewrite_for_variables(
|
||||
tf.compat.v1.get_default_graph())
|
||||
|
||||
tpu.validate_inference_rewrite_for_variables(sess.graph)
|
||||
|
||||
@ -1524,11 +1527,11 @@ def rewrite_for_inference(computation,
|
||||
Other than 'rewriting' the computation to run on a TPU, if using variables
|
||||
in your computation, it moves the ReadVariableOps outside the TPU
|
||||
computation, and adds GuaranteeConst ops just after the ReadVariableOps.
|
||||
This mechanism works only if you are using tf.get_variable() to create and
|
||||
access variables in your tpu computation. You can validate whether this
|
||||
worked, by calling validate_inference_rewrite_for_variables() method
|
||||
immediately after this method to check whether GuaranteeConstOps where
|
||||
added to the graph.
|
||||
This mechanism works only if you are using tf.compat.v1.get_variable() to
|
||||
create and access variables in your tpu computation. You can validate
|
||||
whether this worked, by calling validate_inference_rewrite_for_variables()
|
||||
method immediately after this method to check whether GuaranteeConstOps
|
||||
where added to the graph.
|
||||
|
||||
Args:
|
||||
computation: A Python function that builds a computation to apply to the
|
||||
|
@ -59,8 +59,8 @@ class TableConfig(
|
||||
dimension: The embedding dimension.
|
||||
initializer: A variable initializer function to be used in embedding
|
||||
variable initialization. If not specified, defaults to
|
||||
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
|
||||
`1/sqrt(dimension)`.
|
||||
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and standard
|
||||
deviation `1/sqrt(dimension)`.
|
||||
combiner: A string specifying how to reduce if there are multiple entries
|
||||
in a single row. Currently 'mean', 'sqrtn', 'sum' and None are
|
||||
supported, with 'mean' the default. 'sqrtn' often achieves good
|
||||
|
@ -2106,7 +2106,7 @@ class TPUEstimator(estimator_lib.Estimator):
|
||||
def metric_fn(labels, logits):
|
||||
predictions = tf.argmax(logits, 1)
|
||||
return {
|
||||
'accuracy': tf.metrics.precision(
|
||||
'accuracy': tf.compat.v1.metrics.precision(
|
||||
labels=labels, predictions=predictions),
|
||||
}
|
||||
|
||||
@ -2176,7 +2176,7 @@ class TPUEstimator(estimator_lib.Estimator):
|
||||
def predict_input_fn(params):
|
||||
batch_size = params['batch_size']
|
||||
|
||||
images = tf.random_uniform(
|
||||
images = tf.random.uniform(
|
||||
[total_examples, height, width, 3], minval=-1, maxval=1)
|
||||
|
||||
dataset = tf.data.Dataset.from_tensor_slices(images)
|
||||
@ -2211,8 +2211,8 @@ class TPUEstimator(estimator_lib.Estimator):
|
||||
Exporting
|
||||
=========
|
||||
|
||||
`export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`,
|
||||
and another with `tag_constants.SERVING` and `tag_constants.TPU`.
|
||||
`export_savedmodel` exports 2 metagraphs, one with `saved_model.SERVING`,
|
||||
and another with `saved_model.SERVING` and `saved_model.TPU`.
|
||||
At serving time, these tags are used to select metagraph to load.
|
||||
|
||||
Before running the graph on TPU, TPU system needs to be initialized. If
|
||||
|
@ -28,7 +28,7 @@ def extract_example_parser_configuration(parse_example_op, sess):
|
||||
|
||||
Args:
|
||||
parse_example_op: A ParseExample `Operation`
|
||||
sess: A tf.Session needed to obtain some configuration values.
|
||||
sess: A tf.compat.v1.Session needed to obtain some configuration values.
|
||||
Returns:
|
||||
A ExampleParserConfig proto.
|
||||
|
||||
|
@ -141,7 +141,7 @@ def _add_should_use_warning(x, fatal_error=False):
|
||||
|
||||
Args:
|
||||
x: Python object.
|
||||
fatal_error: Python bool. If `True`, tf.logging.fatal is raised
|
||||
fatal_error: Python bool. If `True`, tf.compat.v1.logging.fatal is raised
|
||||
if the returned value is never used.
|
||||
|
||||
Returns:
|
||||
@ -169,7 +169,7 @@ def _add_should_use_warning(x, fatal_error=False):
|
||||
def should_use_result(fn):
|
||||
"""Function wrapper that ensures the function's output is used.
|
||||
|
||||
If the output is not used, a `tf.logging.error` is logged.
|
||||
If the output is not used, a `tf.compat.v1.logging.error` is logged.
|
||||
|
||||
An output is marked as used if any of its attributes are read, modified, or
|
||||
updated. Examples when the output is a `Tensor` include:
|
||||
@ -203,7 +203,7 @@ def should_use_result(fn):
|
||||
def must_use_result_or_fatal(fn):
|
||||
"""Function wrapper that ensures the function's output is used.
|
||||
|
||||
If the output is not used, a `tf.logging.fatal` error is raised.
|
||||
If the output is not used, a `tf.compat.v1.logging.fatal` error is raised.
|
||||
|
||||
An output is marked as used if any of its attributes are read, modified, or
|
||||
updated. Examples when the output is a `Tensor` include:
|
||||
|
Loading…
Reference in New Issue
Block a user