Comment and error message consistency cleanup.

PiperOrigin-RevId: 168422582
This commit is contained in:
Henry Tan 2017-09-12 12:38:41 -07:00 committed by TensorFlower Gardener
parent 7c19b82af4
commit cd377811d1
8 changed files with 33 additions and 32 deletions

View File

@ -88,14 +88,14 @@ def _magic_gradient_function(op_name, attr_tuple, num_inputs, num_outputs,
Args: Args:
op_name: the name of the op to be differentiated. op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op num_inputs: the number of inputs to the op.
num_outputs: the number of outputs of the op num_outputs: the number of outputs of the op.
*tensors: a list of tensors, composed of, in order, the inputs, the outputs, *tensors: a list of tensors, composed of, in order, the inputs, the outputs,
and the gradients with respect to the outputs. and the gradients with respect to the outputs.
Returns: Returns:
the gradients with respect to the inputs of the function, as a list. The gradients with respect to the inputs of the function, as a list.
""" """
inputs = tensors[:num_inputs] inputs = tensors[:num_inputs]
outputs = tensors[num_inputs:num_inputs + num_outputs] outputs = tensors[num_inputs:num_inputs + num_outputs]
@ -232,9 +232,9 @@ def implicit_val_and_grad(f):
ag_core.active_progenitors.remove(start_node) ag_core.active_progenitors.remove(start_node)
if not ag_core.isnode(end_node): if not ag_core.isnode(end_node):
raise ValueError( raise ValueError(
"Target not part of a computation being traced. %s" % end_node) "Target not part of a computation being traced. %s." % end_node)
if start_node not in end_node.progenitors: if start_node not in end_node.progenitors:
raise ValueError("Target not derived from source. %s %s" % raise ValueError("Target not derived from source. %s %s." %
(end_node.progenitors, repr(start_node))) (end_node.progenitors, repr(start_node)))
output_gradients = kwds.get("output_gradients", None) output_gradients = kwds.get("output_gradients", None)
if output_gradients is None: if output_gradients is None:
@ -282,7 +282,7 @@ def _get_arg_spec(f, params):
return params return params
else: else:
raise ValueError( raise ValueError(
"params must be all strings or all integers; got %s" % params) "params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None): def gradients_function(f, params=None):

View File

@ -286,8 +286,8 @@ class Context(object):
it is unset. it is unset.
`attrs` contains the attributes of the operation as a `tuple` of `attrs` contains the attributes of the operation as a `tuple` of
alternating attribute names and attribute values. alternating attribute names and attribute values.
`inputs` is the `list` of input `tfe.Tensor`(s) to the op. `inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `tfe.Tensor`(s) from the op. `outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored. Return value(s) from the callback are ignored.
""" """
# TODO(cais): (b/64674139) Allow access to function-internal operations. # TODO(cais): (b/64674139) Allow access to function-internal operations.
@ -314,7 +314,7 @@ def _initialize_context():
def context(): def context():
"""Returns a singleton Context object.""" """Returns a singleton context object."""
if _context is None: if _context is None:
_initialize_context() _initialize_context()
return _context return _context
@ -373,7 +373,7 @@ def device(name):
```python ```python
with tfe.device('gpu:0'): with tfe.device('gpu:0'):
with tfe.device('cpu:0'): with tfe.device('cpu:0'):
shape = tfe.Tensor([], dtype=tf.int32) shape = Tensor([], dtype=tf.int32)
x = ops.truncated_normal(shape, tf.float32) x = ops.truncated_normal(shape, tf.float32)
``` ```
will ensure that the `shape` Tensor is on CPU but the `truncated_normal` will ensure that the `shape` Tensor is on CPU but the `truncated_normal`
@ -390,13 +390,13 @@ def device(name):
def run(main=None, argv=None): def run(main=None, argv=None):
"""Runs the program with an optional 'main' function and 'argv' list. """Runs the program with an optional main function and argv list.
The program will run with eager execution enabled. The program will run with eager execution enabled.
Args: Args:
main: the main function to run main: the main function to run.
argv: the arguments to pass to it argv: the arguments to pass to it.
""" """
enable_eager_execution() enable_eager_execution()
app.run(main, argv) app.run(main, argv)

View File

@ -38,7 +38,7 @@ def custom_gradient(f):
"""Decorator to define a function with a custom gradient. """Decorator to define a function with a custom gradient.
The input function is expected to return the tuple The input function is expected to return the tuple
(results, gradient_function) (results, gradient_function).
The output function will return results while possibly recording the The output function will return results while possibly recording the
gradient_function and inputs in the tape. gradient_function and inputs in the tape.

View File

@ -153,9 +153,10 @@ def make_shape(v, arg_name):
try: try:
shape = tensor_shape.as_shape(v) shape = tensor_shape.as_shape(v)
except TypeError as e: except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e)) raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e))
except ValueError as e: except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e)) raise ValueError("Error converting %s to a TensorShape: %s." % (arg_name,
e))
if shape.ndims is None: if shape.ndims is None:
return None return None
else: else:
@ -171,7 +172,7 @@ def make_tensor(v, arg_name):
text_format.Merge(v, pb) text_format.Merge(v, pb)
return pb return pb
raise TypeError( raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" % "Don't know how to convert %s to a TensorProto for argument '%s'." %
(repr(v), arg_name)) (repr(v), arg_name))
@ -217,7 +218,7 @@ def args_to_mixed_eager_tensors(lists):
for l in lists[1:]: for l in lists[1:]:
if len(l) != len(lists[0]): if len(l) != len(lists[0]):
raise ValueError( raise ValueError(
"Expected list arguments to be the same length: %d != %d (%r vs. %r)" "Expected list arguments to be the same length: %d != %d (%r vs. %r)."
% (len(lists[0]), len(l), lists[0], l)) % (len(lists[0]), len(l), lists[0], l))
lists_ret.append([]) lists_ret.append([])

View File

@ -228,8 +228,8 @@ def add_execution_callback(callback):
it is unset. it is unset.
`attrs` contains the attributes of the operation as a `tuple` of `attrs` contains the attributes of the operation as a `tuple` of
alternating attribute name and attribute value. alternating attribute name and attribute value.
`inputs` is the `list` of input `tfe.Tensor`(s) to the op. `inputs` is the `list` of input `Tensor`(s) to the op.
`outputs` is the `list` of output `tfe.Tensor`(s) from the op. `outputs` is the `list` of output `Tensor`(s) from the op.
Return value(s) from the callback are ignored. Return value(s) from the callback are ignored.
""" """
context.get_default_context().add_post_execution_callback(callback) context.get_default_context().add_post_execution_callback(callback)
@ -246,8 +246,8 @@ def seterr(inf_or_nan=None):
Example: Example:
``` python ``` python
tfe.seterr(inf_or_nan="raise") tfe.seterr(inf_or_nan="raise")
a = tfe.Tensor(10.0) a = Tensor(10.0)
b = tfe.Tensor(0.0) b = Tensor(0.0)
c = a / b # <-- Raises InfOrNanError. c = a / b # <-- Raises InfOrNanError.
tfe.seterr(inf_or_nan="ignore") tfe.seterr(inf_or_nan="ignore")

View File

@ -41,7 +41,7 @@ from tensorflow.python.util import nest
# Thread-local storage for tfe Tensors which are referenced while evaluating a # Thread-local storage for tfe Tensors which are referenced while evaluating a
# graph-mode function. # graph-mode function.
_scoped_captures = threading.local() _scoped_captures = threading.local()
# _scoped_captures.tensors is either None or a map from tfe.Tensor id to a pair # _scoped_captures.tensors is either None or a map from Tensor id to a pair
# of a tfe tensor and its corresponding placeholder to pass as a function # of a tfe tensor and its corresponding placeholder to pass as a function
# argument. The value should be None unless we're in function definition # argument. The value should be None unless we're in function definition
# context. # context.
@ -62,7 +62,7 @@ def _convert_to_graph_tensor(value, dtype=None, name=None, as_ref=False):
"""Captures a Tensor while building a graph mode function. """Captures a Tensor while building a graph mode function.
Arguments: Arguments:
value: A tfe.Tensor object value: A Tensor object.
dtype: The datatype of the value produced by the node in the graph. dtype: The datatype of the value produced by the node in the graph.
name: Name of the node in the graph. name: Name of the node in the graph.
as_ref: Ignored (required by register_tensor_conversion_function). as_ref: Ignored (required by register_tensor_conversion_function).
@ -482,12 +482,12 @@ def defun(func):
func must be a Python function that constructs a TensorFlow graph, func must be a Python function that constructs a TensorFlow graph,
typically using functions in the tensorflow module. typically using functions in the tensorflow module.
Arguments to func can be either tfe.Tensor objects or Python Arguments to func can be either Tensor objects or Python
objects. Non-Tensor python objects are treated as constants, and new function objects. Non-Tensor python objects are treated as constants, and new function
definitions are created internally based on their values. definitions are created internally based on their values.
func must return a tf.Tensor (NOT a tfe.Tensor) or a list of tf.Tensor (NOT a func must return a tf.Tensor (NOT a Tensor) or a list of tf.Tensor (NOT a
tfe.Tensor). TODO(apassos) make the wrapped tfe ops return tf.Tensors when in Tensor). TODO(apassos) make the wrapped tfe ops return tf.Tensors when in
graph mode. graph mode.
TODO(apassos): deal with captured global state. Deal with control flow. TODO(apassos): deal with captured global state. Deal with control flow.
@ -497,6 +497,6 @@ def defun(func):
Returns: Returns:
A callable that will execute the compiled function (and return zero A callable that will execute the compiled function (and return zero
or more tfe.Tensor objects) or more Tensor objects).
""" """
return named_defun(func, func.__name__) return named_defun(func, func.__name__)

View File

@ -99,7 +99,7 @@ class TargetTest(test_util.TensorFlowTestCase):
# with tfe.device('/gpu:0'): # with tfe.device('/gpu:0'):
# ... # code here # ... # code here
# with tfe.device('/cpu:0'): # with tfe.device('/cpu:0'):
# shape = tfe.Tensor(...) # shape = Tensor(...)
# y = tfe.ops.random_uniform(.., shape) # y = tfe.ops.random_uniform(.., shape)
# #
# Without the CPU device block tfe.ops.random_uniform would fail since the # Without the CPU device block tfe.ops.random_uniform would fail since the
@ -108,7 +108,7 @@ class TargetTest(test_util.TensorFlowTestCase):
# After this change, we simplify the code: # After this change, we simplify the code:
# #
# with tfe.device('/gpu:0'): # with tfe.device('/gpu:0'):
# y = tfe.ops.random_uniform(, tfe.Tensor(...)) # y = tfe.ops.random_uniform(, Tensor(...))
# #
# The approximation is not exact since if there are GPU kernels which do not # The approximation is not exact since if there are GPU kernels which do not
# require host memory for int32 tensors, there will be a discrepancy between # require host memory for int32 tensors, there will be a discrepancy between

View File

@ -64,7 +64,7 @@ PyObject* TFE_Py_RegisterExceptionClass(PyObject* e);
// class registered via TFE_Py_RegisterExceptionClass) and returns -1. // class registered via TFE_Py_RegisterExceptionClass) and returns -1.
int TFE_Py_MayBeRaiseException(TF_Status* status); int TFE_Py_MayBeRaiseException(TF_Status* status);
// Returns the string associated with the passed-in python object/ // Returns the string associated with the passed-in python object.
char* TFE_GetPyThonString(PyObject* o); char* TFE_GetPyThonString(PyObject* o);
#endif // TENSORFLOW_PYTHON_EAGER_PYWRAP_TFE_H_ #endif // TENSORFLOW_PYTHON_EAGER_PYWRAP_TFE_H_