Minor eager mode improvements

Mainly doesn't get shape when getting rank will suffice.

Before:
entry {
  name: "EagerLinearRegressionBenchmark.eager_train_cpu"
  iters: 2000
  wall_time: 1.54617094994
  extras {
    key: "examples_per_sec"
    value {
      double_value: 82785.1538701
    }
  }
}

After:
entry {
  name: "EagerLinearRegressionBenchmark.eager_train_cpu"
  iters: 2000
  wall_time: 1.38547611237
  extras {
    key: "examples_per_sec"
    value {
      double_value: 92387.0132856
    }
  }
}
PiperOrigin-RevId: 204001196
This commit is contained in:
Akshay Modi 2018-07-10 13:26:13 -07:00 committed by TensorFlower Gardener
parent 1882427291
commit dd1ce0fd8f
5 changed files with 30 additions and 14 deletions

View File

@ -1898,10 +1898,10 @@ PyObject* RecordGradient(PyObject* op_name, PyObject* inputs, PyObject* attrs,
void MaybeWatchVariable(PyObject* input) {
DCHECK(CheckResourceVariable(input));
DCHECK(PyObject_HasAttrString(input, "trainable"));
DCHECK(PyObject_HasAttrString(input, "_trainable"));
tensorflow::Safe_PyObjectPtr trainable(
PyObject_GetAttrString(input, "trainable"));
PyObject_GetAttrString(input, "_trainable"));
if (trainable.get() == Py_False) return;
TFE_Py_TapeSetWatchVariable(input);
}

View File

@ -28,6 +28,18 @@ from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def has_fully_defined_shape(tensor):
"""Returns true if tensor has a fully defined shape."""
return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
def rank(tensor):
"""Return a rank if it is a tensor, else return None."""
if isinstance(tensor, ops.Tensor):
return tensor._rank() # pylint: disable=protected-access
return None
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]

View File

@ -685,8 +685,8 @@ class Layer(checkpointable.CheckpointableBase):
# Handle Keras mask propagation from previous layer to current layer.
previous_mask = None
if (not hasattr(self, '_compute_previous_mask') or
self._compute_previous_mask):
if build_graph and (not hasattr(self, '_compute_previous_mask') or
self._compute_previous_mask):
previous_mask = collect_previous_mask(inputs)
if not hasattr(self, '_call_fn_args'):
self._call_fn_args = self._no_dependency(
@ -726,6 +726,7 @@ class Layer(checkpointable.CheckpointableBase):
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
self.build(input_shapes)
self.built = True
# Check input assumptions set after layer building, e.g. input shape.
if build_graph or in_deferred_mode:
@ -761,8 +762,6 @@ class Layer(checkpointable.CheckpointableBase):
if in_deferred_mode or build_graph and have_all_keras_metadata(inputs):
inputs, outputs = self._set_connectivity_metadata_(
inputs, outputs, args, kwargs)
self.built = True
if context.executing_eagerly():
return outputs

View File

@ -26,6 +26,7 @@ import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
@ -929,13 +930,13 @@ class Dense(Layer):
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
if len(shape) > 2:
rank = common_shapes.rank(inputs)
if rank > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
[0]])
outputs = standard_ops.tensordot(inputs, self.kernel, [[rank - 1], [0]])
# Reshape the output back to the original ndim of the input.
if not context.executing_eagerly():
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:

View File

@ -651,6 +651,9 @@ def cast(x, dtype, name=None):
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
if isinstance(x,
(ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
return x
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
@ -1222,8 +1225,9 @@ def _ReductionDims(x, axis, reduction_indices):
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x._rank() is not None: # pylint: disable=protected-access
return constant_op.constant(np.arange(x._rank()), dtype=dtypes.int32) # pylint: disable=protected-access
rank = common_shapes.rank(x)
if rank is not None:
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
@ -1234,8 +1238,8 @@ def _ReductionDims(x, axis, reduction_indices):
def _may_reduce_to_scalar(keepdims, axis, reduction_indices, output):
"""Set a reduction's output's shape to be a scalar if we are certain."""
if (not output.shape.is_fully_defined()) and (not keepdims) and (
"""Set a reduction's output shape to be a scalar if we are certain."""
if not common_shapes.has_fully_defined_shape(output) and (not keepdims) and (
axis is None) and (reduction_indices is None):
output.set_shape(())
return output