Remove contrib/imperative.

See contrib/eager for an actively developed equivalent functionality.

PiperOrigin-RevId: 168921728
This commit is contained in:
Vincent Vanhoucke 2017-09-15 19:26:49 -07:00 committed by TensorFlower Gardener
parent 6697af4409
commit b5e080de92
10 changed files with 0 additions and 1278 deletions

View File

@ -22,7 +22,6 @@
#tensorflow/contrib/graph_editor/* @purpledog
# NEED OWNER: tensorflow/contrib/grid_rnn/*
#tensorflow/contrib/hvx/* @satok16
#tensorflow/contrib/imperative/* @keveman
#tensorflow/contrib/integrate/* @shoyer
#tensorflow/contrib/kernel_methods/* @petrosmol
#tensorflow/contrib/ios_examples/* @petewarden

View File

@ -295,7 +295,6 @@ filegroup(
"//tensorflow/contrib/hooks:all_files",
"//tensorflow/contrib/hvx/hvx_ops_support_checker:all_files",
"//tensorflow/contrib/image:all_files",
"//tensorflow/contrib/imperative:all_files",
"//tensorflow/contrib/input_pipeline:all_files",
"//tensorflow/contrib/input_pipeline/kernels:all_files",
"//tensorflow/contrib/integrate:all_files",

View File

@ -36,7 +36,6 @@ py_library(
"//tensorflow/contrib/image:distort_image_py",
"//tensorflow/contrib/image:image_py",
"//tensorflow/contrib/image:single_image_random_dot_stereograms_py",
"//tensorflow/contrib/imperative",
"//tensorflow/contrib/input_pipeline:input_pipeline_py",
"//tensorflow/contrib/integrate:integrate_py",
"//tensorflow/contrib/keras",

View File

@ -1,92 +0,0 @@
# Description:
# Imperative mode for TensorFlow.
licenses(["notice"]) # Apache 2.0
exports_files(["LICENSE"])
package(default_visibility = ["//tensorflow:__subpackages__"])
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
py_library(
name = "imperative",
srcs = [
"__init__.py",
"imperative_graph.py",
"imperative_mode.py",
],
srcs_version = "PY2AND3",
deps = [
":imperative_graph",
":imperative_mode",
"//tensorflow/core:protos_all_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:client",
"//tensorflow/python:control_flow_ops",
"//tensorflow/python:dtypes",
"//tensorflow/python:errors",
"//tensorflow/python:framework",
"//tensorflow/python:framework_ops",
"//tensorflow/python:state_ops",
"//tensorflow/python:tensor_shape",
"//tensorflow/python:util",
"//tensorflow/python:variables",
],
)
py_library(
name = "imperative_graph",
srcs = ["imperative_graph.py"],
srcs_version = "PY2AND3",
deps = [
"//tensorflow/core:protos_all_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:control_flow_ops",
"//tensorflow/python:dtypes",
"//tensorflow/python:errors",
"//tensorflow/python:framework_ops",
"//tensorflow/python:state_ops",
"//tensorflow/python:tensor_shape",
"//tensorflow/python:util",
"//tensorflow/python:variables",
],
)
py_library(
name = "imperative_mode",
srcs = ["imperative_mode.py"],
srcs_version = "PY2AND3",
deps = [
":imperative_graph",
"//tensorflow/python:client",
"//tensorflow/python:errors",
"//tensorflow/python:framework",
"//tensorflow/python:framework_ops",
],
)
cuda_py_test(
name = "imperative_test",
size = "small",
srcs = ["imperative_test.py"],
additional_deps = [
":imperative_mode",
"//third_party/py/numpy",
"//tensorflow/python:client_testlib",
"//tensorflow/python:training",
"//tensorflow/python:framework_test_lib",
],
)
filegroup(
name = "all_files",
srcs = glob(
["**/*"],
exclude = [
"**/METADATA",
"**/OWNERS",
],
),
visibility = ["//tensorflow:__subpackages__"],
)

View File

@ -1,155 +0,0 @@
## Imperative programming in TensorFlow
In the standard TensorFlow library, the specification of the computation is done
statically in terms of a computation graph, and is separate from the execution
of the graph. This model of programming is referred to as *lazy*, *deferred*,
*dynamic*, or, *asynchronous*. This library brings imperative style programming (à
la [NumPy](http://www.numpy.org)) to TensorFlow. Using this library, you can:
* Write code in an imperative style: the results of the computation are available
right after the execution of a line of code.
* Use TensorFlow operations on tensors, and get all the benefits of GPU
acceleration.
* Include any Python control flow statements like `while` and `if` when
specifying the computation.
* Perform automatic differentiation on your code with the
standard
[`tf.gradients`](https://www.tensorflow.org/api_docs/python/train/gradient_computation#gradients) function.
### Getting started
This library is a thin wrapper over the standard TensorFlow Python library. The
source code is
available
[here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/imperative). You
can get started on Linux by installing the nightly PIP package linked off
[the main page](https://github.com/tensorflow/tensorflow). Please
consult [this](https://github.com/tensorflow/tensorflow#installation) document for other platforms and the PIP package including GPU
support.
### Write your first imperative TensorFlow program
```shell
$ python
```
```python
>>> import tensorflow.contrib.imperative as tf
>>> x = tf.constant([[7.], [6]])
>>> y = tf.constant([[6., 7]])
>>> tf.matmul(x, y)
array([[ 42., 49.],
[ 36., 42.]], dtype=float32)
```
Note that this code is identical in terms of the programmer's mental model to
the following NumPy code:
```python
>>> import numpy as np
>>> x = np.array([[7.], [6]])
>>> y = np.array([[6., 7]])
>>> x.dot(y)
array([[ 42., 49.],
[ 36., 42.]])
```
The library can be imported as `import tensorflow.contrib.imperative as tf`
(contrast with importing standard TensorFlow, which is done as `import
tensorflow as tf`). This import statement makes all of standard TensorFlow
available in the `tf` symbol. However, it is not necessary to create a session
object and set it up to run and fetch tensors.
### Features
The library provides the following additional features on top of standard
TensorFlow:
* Tensors are automatically fetched when used in contexts that expect their
value.
- Printing
```python
x = tf.constant(10)
y = tf.constant(32)
print(x + y)
42
```
- Use in conditionals
```python
x = tf.constant(30)
if x > 4:
print('Greater than 4')
Greater than 4
x = tf.random_normal([3])
y = x * 2
while tf.global_norm([y]) < 1000:
y = y * 2
print(y)
[ -213.2868042 -511.02456665 1026.66882324]
```
* Variables are automatically initialized, no need to run the
[`tf.global_variables_initializer()`](https://www.tensorflow.org/api_docs/python/tf/global_variables_initializer) operation.
```python
x = tf.Variable(np.random.normal(size=[2, 2]), dtype=tf.float32)
y = tf.constant([[1, 2.]])
z = tf.matmul(y, x)
print(z)
array([[-1.231673 , 3.14744973]], dtype=float32)
```
* Gradients work as expected using the standard `tf.gradients` function.
```python
x = tf.Variable(np.random.rand(1, 3))
y = tf.exp(x)
dy = tf.gradients(y, x)
# dy/dx should be equal to y (= exp(x))
print(y, dy)
(array([[ 1.79997761, 2.00581881, 2.37302414]]), [array([[ 1.79997761, 2.00581881, 2.37302414]])])
```
### Caveats
The library is implemented on top of standard TensorFlow. It still constructs a
graph in the background and defers op execution. But when an op executes for the
first time, its results are cached and the cached value is returned for future
executions, thus providing imperative semantics. Because of this implementation
choice, this library comes with the following caveats:
* **Use inside Python loops:** A graph is constructed and kept around in
the background, both for just executing using the standard TensorFlow runtime,
and also for allowing automatic differentiation via `tf.gradients`. This means
that the graph keeps growing when TensorFlow functions are called inside a
Python loop. This library provides a `tf.new_step` method that clears the
graph as well as the cached tensors that have been kept around for gradient
computation. `tf.new_step` can be used as a context manager around, say, a
training loop to clear the graph after each training step.
```python
x = tf.Variable(constant_op.constant(1.0))
for i in range(10):
# Create a new training step
with tf.new_step() as step:
# Perform computation and variable updates
step.run(tf.assign_sub(x, 0.1))
self.assertAllClose(tf.identity(x), 1.0 - (i + 1) * 0.1)
# The graph within this context is cleared at this point.
```
* **Speed:** Redundant graph construction and caching of tensor values adds
overheads that are not present in standard TensorFlow, where typically the
graph is constructed once and executed multiple times. This library is
intended as a vehicle to prototype the imperative programming model in
TensorFlow. The runtime overheads can be alleviated with various optimizations
to the runtime that would equally benefit the deferred execution mode as
well.

View File

@ -1,51 +0,0 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Imperative mode for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow import * # pylint: disable=wildcard-import
from tensorflow.contrib.imperative import imperative_mode
class _InteractiveMode(object):
"""Imperative mode suitable for interactive execution.
This module has a global _InteractiveMode object that enables
writing code as follows:
```python
import tensorflow.contrib.imperative as tf
print(tf.constant(42))
```
"""
def __init__(self, target=None):
if not target:
target = train.Server.create_local_server().target
self.target = target
self.imperative_mode = imperative_mode.ImperativeMode(self.target)
self.imperative_mode.__enter__()
def new_step(self):
return self.imperative_mode.new_step()
_default_interactive_mode = _InteractiveMode()
def new_step():
return _default_interactive_mode.new_step()

View File

@ -1,124 +0,0 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST training in imperative mode TensorFlow."""
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.contrib.imperative as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
IMAGE_SIZE = 28
IMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE
NUM_CLASSES = 10
BATCH_SIZE = 100
NUM_EPOCHS = 2
LEARNING_RATE = 0.1
class Model(object):
"""Fully connected model for MNIST."""
def __init__(self, hidden1_units, hidden2_units):
"""Create the model parameters."""
self.params = []
# Hidden 1
with tf.name_scope('hidden1'):
self.weights1 = tf.Variable(
np.random.normal(scale=1.0 / np.sqrt(float(IMAGE_PIXELS)),
size=[IMAGE_PIXELS, hidden1_units]),
dtype=tf.float32,
name='weights')
self.biases1 = tf.Variable(
np.zeros([hidden1_units]),
dtype=tf.float32,
name='biases')
# Hidden 2
with tf.name_scope('hidden2'):
self.weights2 = tf.Variable(
np.random.normal(scale=1.0 / np.sqrt(float(hidden1_units)),
size=[hidden1_units, hidden2_units]),
dtype=tf.float32,
name='weights')
self.biases2 = tf.Variable(
np.zeros([hidden2_units]),
dtype=tf.float32,
name='biases')
# Linear
with tf.name_scope('softmax_linear'):
self.sm_w = tf.Variable(
np.random.normal(scale=1.0 / np.sqrt(float(hidden2_units)),
size=[hidden2_units, NUM_CLASSES]),
dtype=tf.float32,
name='weights')
self.sm_b = tf.Variable(
np.zeros([NUM_CLASSES]),
dtype=tf.float32,
name='biases')
self.params = [self.weights1, self.biases1,
self.weights2, self.biases2,
self.sm_w, self.sm_b]
def __call__(self, images):
"""Run the model's forward prop on `images`."""
hidden1 = tf.nn.relu(tf.matmul(images, self.weights1) + self.biases1)
hidden2 = tf.nn.relu(tf.matmul(hidden1, self.weights2) + self.biases2)
logits = tf.matmul(hidden2, self.sm_w) + self.sm_b
return logits
model = Model(128, 32)
data = read_data_sets('/tmp/mnist_train')
def get_test_accuracy():
"""Gets the model's classification accuracy on test data."""
num_examples = data.test.num_examples
test_images = np.split(data.test.images, num_examples/BATCH_SIZE)
test_labels = np.split(data.test.labels.astype(np.int32),
num_examples/BATCH_SIZE)
num_correct = 0
for _, (images, labels) in enumerate(zip(test_images, test_labels)):
with tf.new_step():
logits = model(images)
predictions = tf.argmax(tf.nn.softmax(logits), axis=1)
num_correct += np.sum(predictions.value == labels)
return float(num_correct) / float(num_examples)
num_examples = data.train.num_examples
train_images = np.split(data.train.images, num_examples/BATCH_SIZE)
train_labels = np.split(data.train.labels.astype(np.int32),
num_examples/BATCH_SIZE)
for epoch in range(NUM_EPOCHS):
for i, (images, labels) in enumerate(zip(train_images, train_labels)):
with tf.new_step() as step:
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='xentropy')
loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
gradients = tf.gradients(loss, model.params)
step.run([v.assign_sub(LEARNING_RATE * g)
for g, v in zip(gradients, model.params)])
if i % 10 == 0:
print('Loss after {} steps = {}'.format(i, loss))
if i % 100 == 0:
print('Test accuracy after {} steps = {}'
.format(i, get_test_accuracy()))

View File

@ -1,497 +0,0 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Imperative mode graph for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import uuid
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.util import compat
# Stateful operators (with ref type input/outputs) that are allowed to be
# present in an ImperativeGraph.
_REF_OPS_WHITELIST = frozenset(['Variable', 'VariableV2', 'Assign', 'AssignAdd',
'AssignSub', 'ScatterAdd', 'ScatterSub',
'ScatterUpdate'])
# These ops are returned as is in create_op without the extra logic. This
# saves some space used for auxiliary variables.
_PASS_THROUGH_OPS = frozenset(['Identity'])
class ImperativeGraph(ops.Graph):
"""A class implementing an imperative mode TensorFlow graph.
The ops constructed in an ImperativeGraph are augmented with extra logic to
enable its execution in an imperative manner. Imperative graphs are organized
hierarchically. A new step created from an `ImperativeMode` object creates a
new graph that is a child of this graph. In that case, an object of this
class is expected to be initialized with a parent graph, passed as
`parent_graph` to the initializer. Note that `parent_graph` is expected to
be set only when initialized from the `ImperativeMode` initializer.
"""
def __init__(self, parent_graph=None):
"""Initializes an ImperativeGraph.
Args:
parent_graph: (Optional) An ImperativeGraph.
"""
self._parent_graph = parent_graph
# Whether the create_op function should augment an op with extra logic for
# imperative execution.
self._return_as_is = False
# Operation -> list of Tensors map. Used for overriding the op.outputs
# property, useful during gradient computation.
self._outputs_map = {}
# Operation -> function map. Used for overriding the gradient function
# for an op.
self._gradient_function_map = {}
# Unique name for the graph. Used for naming the container in which
# temporary variables are placed.
self._name = uuid.uuid4().hex
# Names for op types used for marking ops so we can override their
# gradient functions.
self._merge_op_type = 'ImperativeMerge' + self._name
self._imperative_op_type = 'ImperativeOp' + self._name
# The list of 'assign' ops that initialize variables.
self._init_ops = []
# Names of variables whose init ops have been already recorded in _init_ops.
self._init_variable_names = set()
# A flag to indicate whether a variable and the corresponding initialization
# ops are being created. Typically set by the initializer of Variable class.
self._in_variable_creation = False
self._variable_cleanup_ops = []
# Call the parent's initializer.
super(ImperativeGraph, self).__init__()
# Register a simple 'pass through' function to be used for ops that have
# _merge_op_type as the _gradient_op_type attribute.
ops.RegisterGradient(self._merge_op_type)(
lambda op, grad, _: [grad] * len(op.inputs))
# For ops that have _imperative_op_grad as the _gradient_op_type attribute,
# temporarily replace their outputs with the values in _output_map before
# calling the original gradient function.
def _imperative_op_grad(op, *grad):
with self.replace_outputs(op):
return self._gradient_function_map[op.name](op, *grad)
ops.RegisterGradient(self._imperative_op_type)(_imperative_op_grad)
def op_in_graph(self, op):
"""Checks if op belongs in this graph or its ancestors."""
# pylint: disable=protected-access
if op._graph == self:
return True
# pylint: enable=protected-access
if self._parent_graph:
return self._parent_graph.op_in_graph(op)
return False
def is_child_graph(self, child_graph):
"""Checks if this graph is an ancestor of `child_graph`."""
# pylint: disable=protected-access
if not child_graph or not child_graph._parent_graph:
return False
if child_graph._parent_graph == self:
return True
return self.is_child_graph(child_graph._parent_graph)
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def record_variable_inits(self):
"""Context manager to record Variable initializations.
Sets _in_variable_creation to True before a Variable is initialized.
NOTE(keveman): This is used for recording the list of assign ops
that are used to initialize variables. It relies on the fact that
the constructor of Variable class creates exactly one assign op that is
used for initializing the variable. Variable ops not created using the
variables.Variable class are not added to _init_ops and hence not
initialized automatically.
"""
old_init = getattr(variables.Variable, '__init__')
def record(*args, **kwargs):
self._in_variable_creation = True
old_init(*args, **kwargs)
self._in_variable_creation = False
setattr(variables.Variable, '__init__', record)
yield
setattr(variables.Variable, '__init__', old_init)
# pylint: enable=g-doc-return-or-yield
@contextlib.contextmanager
def return_as_is(self):
"""Prevents adding the extra logic during `create_op`."""
old_return_as_is = self._return_as_is
self._return_as_is = True
yield
self._return_as_is = old_return_as_is
@contextlib.contextmanager
def replace_outputs(self, op):
"""Replaces the outputs of `op` with values recorded in `_outputs_map`."""
# pylint: disable=protected-access
old_outputs = op._outputs
op._outputs = self._outputs_map[op.name]
yield
op._outputs = old_outputs
# pylint: enable=protected-access
def add_pending_init(self, init_op):
"""Records assign ops in `_init_ops`."""
if init_op.type != 'Assign':
raise TypeError('Init op should be an Assign')
var_name = init_op.inputs[0].op.name
if var_name not in self._init_variable_names:
self._init_variable_names.add(var_name)
self._init_ops.append(init_op)
def run_pending_inits(self, session):
"""Runs the pending variable initializations using `session`."""
while self._init_ops:
session.run(self._init_ops.pop(0))
def _wrap(self, op):
return OperationProxy(op)
def create_op(self, *args, **kwargs):
"""Creates an `Operation`.
For operations of the following form
orig_value = op(*args, **kwargs)
this function constructs the following subgraph :
v = Variable()
if v is not initialized:
orig_value = op(*args, **kwargs)
v.assign(orig_value) # Initializes v
return orig_value
else:
return v
The above transformation is not performed and the original op is returned
as is if any of the following is true:
* `_return_as_is` flag is set to true.
* op_type is listed in _PASS_THROUGH_OPS
* op has no outputs.
* One of the op's return value has a ref type.
Args:
*args: Arguments for create_op()
**kwargs: Keyword arguments for create_op(). Refer to
tensorflow.python.framework.ops.Graph.create_op() for the mandatory
and optional arguments.
Returns:
An Operation.
Raises:
UnimplementedError: if output type is a reference and the op's type
is not one of the supported types in `_REF_OPS_WHITELIST`.
"""
op_type = kwargs['op_type'] if 'op_type' in kwargs else args[0]
output_dtypes = kwargs['dtypes'] if 'dtypes' in kwargs else args[2]
output_dtypes = [dtypes.as_dtype(d) for d in output_dtypes]
if self._return_as_is or op_type in _PASS_THROUGH_OPS:
return self._wrap(super(ImperativeGraph, self).create_op(*args, **kwargs))
if not output_dtypes:
return self._wrap(
super(ImperativeGraph, self).create_op(*args, **kwargs))
output_has_ref = any([dtype._is_ref_dtype for dtype in output_dtypes]) # pylint: disable=protected-access
if output_has_ref:
if op_type not in _REF_OPS_WHITELIST:
raise errors.UnimplementedError(None, None,
op_type + ' op not supported in '
'imperative graph')
ret = super(ImperativeGraph, self).create_op(*args, **kwargs)
if self._in_variable_creation:
if op_type == 'Assign':
self.add_pending_init(ret)
return self._wrap(ret)
with self.return_as_is():
# Declares the variables to hold the output values of this op.
op_output_var = [state_ops.variable_op_v2(
tensor_shape.TensorShape(None), dtype, container=self._name)
for dtype in output_dtypes]
# Ops to free the resources used by the temporary cache variables.
# The following two ops are created for each cache variable,
# having no control dependencies on any other ops :
# var_handle_op ----> destroy_resource_op
for dtype, v in zip(output_dtypes, op_output_var):
with ops.control_dependencies(None):
self._variable_cleanup_ops += [
gen_resource_variable_ops.destroy_resource_op(
gen_resource_variable_ops.var_handle_op(
dtype, tensor_shape.TensorShape(None),
container=self._name, shared_name=v.op.name),
ignore_lookup_error=True)]
# Create the conditional to run the original op only when the variable
# corresponding to the first output is not initialized.
inited = state_ops.is_variable_initialized(op_output_var[0])
v_f, v_t = control_flow_ops.ref_switch(op_output_var[0], inited)
# pylint: disable=protected-access
v_f_op = gen_array_ops._ref_identity(v_f)
v_t_op = gen_array_ops._ref_identity(v_t)
# pylint: enable=protected-access
with ops.control_dependencies([v_f_op.op]):
# Create the original op
orig_op = self._wrap(
super(ImperativeGraph, self).create_op(*args, **kwargs))
shapes = [val.get_shape() for val in orig_op.outputs]
controls = []
for var, val in zip(op_output_var, orig_op.outputs):
if (not val.get_shape().is_fully_defined() or
val.get_shape().num_elements() > 0):
assign_op = state_ops.assign(var, val, validate_shape=False)
assign_op.set_shape(val.get_shape())
controls.append(assign_op)
values = []
if len(controls) > 1:
if control_flow_ops.IsSwitch(orig_op):
# pylint: disable=protected-access
controls = gen_control_flow_ops._ref_merge(controls)
# pylint: enable=protected-access
else:
controls = control_flow_ops.tuple(controls)
for var, val in zip(op_output_var, orig_op.outputs):
with ops.control_dependencies(controls):
with self.colocate_with(v_f_op):
real_val = array_ops.identity(val)
with ops.control_dependencies([v_t_op.op]):
with self.colocate_with(v_t_op):
stored_val = array_ops.identity(var)
stored_val.set_shape(val.get_shape())
real_val, _ = control_flow_ops.merge([real_val, stored_val])
real_val.op.node_def.attr['_gradient_op_type'].CopyFrom(
attr_value_pb2.AttrValue(s=compat.as_bytes(self._merge_op_type)))
values.append(real_val)
for i, _ in enumerate(shapes):
values[i].set_shape(shapes[i])
self._outputs_map[orig_op.name] = values
try:
self._gradient_function_map[orig_op.name] = ops.get_gradient_function(
orig_op)
except (KeyError, LookupError):
pass
else:
orig_op.node_def.attr['_gradient_op_type'].CopyFrom(
attr_value_pb2.AttrValue(
s=compat.as_bytes(self._imperative_op_type)))
return MultiOutputOperation(values, orig_op)
class MultiOutputOperation(object):
"""A 'duck-type' wrapper class for a list of Tensors, acting as an Operation.
NOTE(keveman): `create_op` produces a list of values but collected from
multiple ops. So there is no one `Operation` that we can pass to the consumers
of `create_op`. But the consumers of `create_op` only require the object
passed in to have the `outputs` property and get_attr method defined. This
class simply defines the `outputs` property, so the consumers of `create_op`
work correctly.
"""
def __init__(self, outputs, op):
self.outputs = outputs
self._op = op
def get_attr(self, name):
return self._op.get_attr(name)
class OperationProxy(ops.Operation):
"""A proxy for the `ops.Operation` class.
Imperative graphs are organized hierarchically. Operations in an imperative
graph can be constructed out of operations belonging to any of the parent
graphs available in the lexical scope. This class provides the illusion that
all such operations belong to the current default graph.
"""
__slots__ = ['_name', '_original_graph']
def __init__(self, real_op):
# object.__setattr__ is used for setting '_name' and '_original_graph'
# attributes (instead of self._name, for eg.) as this class provides
# its own __setattr__ method for proxying purposes.
object.__setattr__(self, '_name', real_op.name)
object.__setattr__(self, '_original_graph', real_op.graph)
# pylint: disable=protected-access
for output in real_op._outputs:
output._op = self
real_op._outputs = [TensorProxy(output) for output in real_op._outputs]
# pylint: enable=protected-access
def __getattribute__(self, name):
"""Forwards to the methods in the current graph's `Operation` object."""
op_name = object.__getattribute__(self, '_name')
graph = ops.get_default_graph()
# Short-circuit getting some of these attributes that are readily
# available without forwarding to the actual operation. This is done
# because `get_operation_by_name` tries to acquire the parent graph's
# lock protecting the nodes_by_* data structures, and these attributes
# (not requiring the lock) could be queried by other function holding
# the lock.
if name == 'name':
return op_name
elif name == '_as_graph_element':
return lambda: self
elif name == '__class__':
return OperationProxy
elif name == 'graph':
original_graph = object.__getattribute__(self, '_original_graph')
if original_graph.is_child_graph(graph):
return graph
else:
return original_graph
else:
op = graph.get_operation_by_name(op_name)
return getattr(op, name)
def __setattr__(self, name, value):
# `replace_outputs` overrides _outputs temporarily, so support
# setting that attribute.
if name != '_outputs':
raise NotImplementedError('"op.%s = ..." not implemented' % name)
op_name = object.__getattribute__(self, '_name')
graph = ops.get_default_graph()
op = graph.get_operation_by_name(op_name)
setattr(op, name, value)
class TensorProxy(ops.Tensor):
"""Forwards to the methods in the current graph's `Tensor` object."""
__slots__ = ['_name', '_original_tensor', '_original_graph']
def __init__(self, real_tensor):
setattr(self, '_name', real_tensor.name)
setattr(self, '_original_tensor', real_tensor)
setattr(self, '_original_graph', real_tensor.graph)
def __str__(self):
sess = getattr(ops.Tensor, 'session', None)
if sess:
return str(sess.run(self))
else:
return ops.Tensor.__str__(self)
def __repr__(self):
sess = getattr(ops.Tensor, 'session', None)
if sess:
return repr(sess.run(self))
else:
return ops.Tensor.__repr__(self)
def __bool__(self):
sess = getattr(ops.Tensor, 'session', None)
if sess:
return bool(sess.run(self))
else:
return ops.Tensor.__bool__(self)
def __nonzero__(self):
sess = getattr(ops.Tensor, 'session', None)
if sess:
return bool(sess.run(self))
else:
return ops.Tensor.__nonzero__(self)
def __getattribute__(self, name):
tensor_name = object.__getattribute__(self, '_name')
graph = ops.get_default_graph()
if name == 'name':
return tensor_name
elif name == '_as_graph_element':
return lambda: self
elif name == '__class__':
return TensorProxy
elif name == 'graph':
original_graph = object.__getattribute__(self, '_original_graph')
if original_graph.is_child_graph(graph):
return graph
else:
return original_graph
elif name == 'value':
sess = getattr(ops.Tensor, 'session', None)
if sess:
return sess.run(self)
raise AttributeError('Current session not set on Tensor')
else:
tensor = object.__getattribute__(
graph.get_tensor_by_name(tensor_name), '_original_tensor')
return getattr(tensor, name)
@contextlib.contextmanager
def add_session_attr(typename, session):
"""Sets the `session` property on the typename for the duration of a context.
This allows us to convert a `tf.Tensor` to numpy array by calling run()
using the `.session` property.
Args:
typename: The class to which value attribute should be added.
session: Session to be stored.
Yields:
None.
"""
old_session = getattr(typename, 'session', None)
setattr(typename, 'session', session)
yield
if old_session:
setattr(typename, 'session', old_session)

View File

@ -1,154 +0,0 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Imperative mode for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.imperative import imperative_graph
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
class ImperativeMode(object):
"""Imperative mode execution of TensorFlow graphs.
This class is a container for an ImperativeGraph, a session, and other
context managers that enable imperative mode execution. The following is
the common usage pattern:
```python
server = tf.train.Server.create_local_server()
with ImperativeMode(server.target):
a = tf.random_normal([])
b = tf.random_normal([])
c = a + b
c_val = c.value
d = c + 1.0
d_val = d.value
# Expect d_val == c_val + 1.0
```
ImperativeMode provides the illusion of immediate execution. It still
constructs a graph and defers op execution. But when an op executes for
the first time, its results are cached and the cached value is returned for
future executions. The __exit__ method clears this graph and cached values.
To use ImperativeMode inside a loop, the `new_step` method can be used to
create a temporary context around the loop body to clear the cache at loop
exit as follows:
```python
server = tf.train.Server.create_local_server()
with ImperativeMode(server.target) as mode:
w = tf.get_variable('w', [])
for i in range(10):
with mode.new_step():
x = tf.random_uniform([])
y = tf.random_uniform([])
z = w.assign_add(x + y)
print(z.value)
```
ImperativeMode graph does not support all TensorFlow operations and features.
Here are the current known limitations of ImperativeMode :
* Stateful operations returned ref-typed tensors are limited to
TensorFlow Variables and the associated operations. Data structures such as
queues barriers, etc. are not supported in ImperativeMode.
* Variables created and managed via `tf.variable_scope` and the associated
`tf.get_variable` are not supported. (These use auxiliary data structures in
addition to the graph, which are not aware of the imperative mode execution.)
TODO(keveman): Remove the above restrictions on ImperativeMode.
"""
def __init__(self, target, parent_graph=None):
"""Initializes an ImperativeMode.
Args:
target: The TensorFlow execution engine to connect to.
parent_graph: (Optional) An ImperativeGraph.
Raises:
UnimplementedError: if non-None parent_graph is not an ImperativeGraph.
"""
self._target = target
self._parent_graph = parent_graph
# Create a new graph
self._graph = imperative_graph.ImperativeGraph(
parent_graph=self._parent_graph)
self._default_graph = self._graph.as_default()
# Context manager to record variable inits
self._record_variable_inits = self._graph.record_variable_inits()
if self._parent_graph:
if not isinstance(self._parent_graph, imperative_graph.ImperativeGraph):
raise errors.UnimplementedError(None, None, 'ImperativeMode needs an '
'ImperativeGraph')
# Clone the `_parent_graph` in to the current graph. This is so that
# operations used from the enclosing ImperativeMode context are
# available in the current context.
with self._graph.as_default(), self._graph.return_as_is():
importer.import_graph_def(self._parent_graph.as_graph_def(), name='')
self._session = session.Session(graph=self._graph, target=self._target)
# Override the `_session`'s run, so that variable inits can be
# called before the actual run.
self._old_run = self._session.run
self._session.run = self.run
self._context_managers = [
self._session.as_default(),
self._default_graph,
self._record_variable_inits,
imperative_graph.add_session_attr(ops.Tensor, self._session)]
def run(self, *args, **kwargs):
"""Runs the variable init ops before calling the original run method."""
self._graph.run_pending_inits(self._session)
ret = self._old_run(*args, **kwargs)
return ret
def __enter__(self):
"""Enters the runtime contexts of the `_context_managers`."""
for c in self._context_managers:
c.__enter__()
return self
def __exit__(self, exec_type, exec_value, exec_tb):
"""Cleans up resources, exits the runtime contexts in reverse order."""
# pylint: disable=protected-access
if self._graph._variable_cleanup_ops:
self._session.run(self._graph._variable_cleanup_ops)
# pylint: enable=protected-access
self._session.close()
for c in reversed(self._context_managers):
c.__exit__(exec_type, exec_value, exec_tb)
def new_step(self):
"""Returns a new 'child' ImperativeMode.
`new_step` enables running the imperative mode inside a Python loop. The
ImperativeGraph object and the tensors created and cached during the
execution of that graph are destroyed when the context entered with the
object returned from this function is 'exited'. However, the operations
in `self._graph` and any of its ancestors can be freely used as
operands to operations in the graph contained in the object returned
by this function.
Returns:
A new ImperativeMode object.
"""
self._graph.run_pending_inits(self._session)
return ImperativeMode(self._target, parent_graph=self._graph)

View File

@ -1,202 +0,0 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for imperative mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.imperative import imperative_graph
from tensorflow.contrib.imperative import imperative_mode
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import training
FLAGS = flags.FLAGS
class ImperativeTest(test.TestCase):
def setUp(self):
self._server = training.Server.create_local_server()
self._target = self._server.target
def testBasic(self):
"""Tests basic functionality.
Fetching the value of `d` with `d.value` will evaluate `c` again
in non-imperative mode. However, in imperative mode, `c` should
have the value it had when it was first evaluated with `c.value`.
"""
with imperative_mode.ImperativeMode(self._target):
a = random_ops.random_normal([])
b = random_ops.random_normal([])
c = a + b
c_val = c.value
d = c + 1.0
d_val = d.value
self.assertAllClose(c_val + 1.0, d_val)
def testExpGrad(self):
"""Tests gradients."""
with imperative_mode.ImperativeMode(self._target):
x = variables.Variable(np.random.rand(1, 3))
x_init = x.value().value
y = math_ops.exp(x)
dy = gradients_impl.gradients(y, x)
self.assertAllClose(np.exp(x_init), y.value)
# dy/dx should be equal to y (= exp(x))
self.assertAllClose(y.value, dy[0].value)
def testLoopGrads(self):
"""Tests gradients in the presence of Python loops."""
with imperative_mode.ImperativeMode(self._target):
w = variables.Variable(np.eye(3))
x = constant_op.constant(np.eye(3))
for _ in range(3):
x = math_ops.add(x, w)
y = gradients_impl.gradients(x, w)
self.assertAllClose(y[0].value, np.array([3.] * 9).reshape(3, 3))
def testVariable(self):
"""Makes sure that variables can be evaluated before running initializer."""
with imperative_mode.ImperativeMode(self._target):
x = variables.Variable(1, name='xy')
self.assertEqual(x.value().value, 1)
x = x.assign_add(41)
self.assertEqual(x.value, 1 + 41)
y = variables.Variable(3, name='y')
self.assertEqual(y.value().value, 3)
def testNewStep(self):
"""Tests the `new_step` functionality."""
with imperative_mode.ImperativeMode(self._target) as mode:
for _ in range(4):
with mode.new_step() as step:
a = random_ops.random_uniform([])
a_init = a.value
for _ in range(4):
with step.new_step():
# Values coming from outside this step's scope should not
# be changing.
self.assertEqual(a.value, a_init)
b = a + random_ops.random_uniform([], minval=0.1)
self.assertGreaterEqual(b.value, a.value)
def testGradientThroughNewStep(self):
with imperative_mode.ImperativeMode(self._target) as mode:
x = constant_op.constant(np.random.rand(3))
y = math_ops.tanh(x)
with mode.new_step():
z = constant_op.constant(np.random.rand(3))
w = math_ops.multiply(y, z)
dx = gradients_impl.gradients(w, x)
self.assertAllClose(dx[0].value, z.value * (1.0 - y.value ** 2))
def testEscape(self):
"""Makes sure that values don't escape a `new_step` scope."""
with imperative_mode.ImperativeMode(self._target) as mode:
x = constant_op.constant(1)
with mode.new_step():
y = math_ops.add(x, constant_op.constant(3))
self.assertEqual(y.value, 4)
with mode.new_step():
with imperative_graph.add_session_attr(ops.Tensor, None):
with self.assertRaises(KeyError):
_ = y + constant_op.constant(1)
def testZeroSized(self):
"""Tests evaluating zero-sized tensors."""
with imperative_mode.ImperativeMode(self._target):
x = constant_op.constant(1)
y = array_ops.shape(x)
self.assertEqual(list(y.value), [])
def testTrainingLoop(self):
with imperative_mode.ImperativeMode(self._target) as mode:
w = variables.Variable(np.random.rand(3))
x = constant_op.constant(np.random.rand(3))
y = math_ops.multiply(x, w)
dw = gradients_impl.gradients(y, w)
self.assertAllClose(dw[0].value, x.value)
for _ in range(3):
with mode.new_step():
x = constant_op.constant(np.random.rand(3))
y = math_ops.multiply(x, w)
dw = gradients_impl.gradients(y, w)
self.assertAllClose(dw[0].value, x.value)
def testUseAfterNewStep(self):
with imperative_mode.ImperativeMode(self._target) as mode:
x = constant_op.constant(1)
self.assertAllClose(x.value, 1)
with mode.new_step():
pass
self.assertAllClose(x.value, 1)
def testStringify(self):
with imperative_mode.ImperativeMode(self._target):
np_a = np.random.rand(2, 2)
a = constant_op.constant(np_a)
self.assertEqual(str(a), str(np_a))
def testBoolCoercion(self):
with imperative_mode.ImperativeMode(self._target):
self.assertFalse(not constant_op.constant([1.0]))
with self.assertRaises(ValueError) as ve:
_ = not constant_op.constant(np.random.rand(2))
self.assertTrue('The truth value of an array with'
' more than one element is ambiguous.'
' Use a.any() or a.all()' in str(ve.exception))
def testMeanGrad(self):
with imperative_mode.ImperativeMode(self._target):
x = constant_op.constant([1.0, 2.0])
y = math_ops.reduce_mean(x)
dy = gradients_impl.gradients(y, x)[0]
self.assertAllEqual(dy.value, [0.5, 0.5])
def testVarUseInNewStep(self):
with imperative_mode.ImperativeMode(self._target) as mode:
x = variables.Variable(1.0)
with mode.new_step():
self.assertEqual(array_ops.identity(x).value, 1.0)
def testVarChange(self):
with imperative_mode.ImperativeMode(self._target) as mode:
x = variables.Variable(constant_op.constant(1.0))
for i in range(10):
with mode.new_step() as step:
step.run(state_ops.assign_sub(x, 0.1))
self.assertAllClose(array_ops.identity(x).value, 1.0 - (i + 1) * 0.1)
if __name__ == '__main__':
FLAGS.rpc_default_rate_acl = 'INSECURE'
test.main()