Internal change
PiperOrigin-RevId: 306166861 Change-Id: I24f36a98ff77d578f58aa56f62a96cce1da90e7b
This commit is contained in:
parent
716bcea5a2
commit
cd153446ee
@ -94,16 +94,10 @@ class CheckNumericsCallbackTest(test_util.TensorFlowTestCase):
|
||||
|
||||
dataset = dataset_ops.Dataset.from_tensor_slices(tensor).batch(2).map(
|
||||
map_fn)
|
||||
iterator = dataset_ops.make_one_shot_iterator(dataset)
|
||||
|
||||
@def_function.function
|
||||
def get_batches():
|
||||
iterator = iter(dataset)
|
||||
return [next(iterator), next(iterator)]
|
||||
|
||||
batches = self.evaluate(get_batches())
|
||||
self.assertLen(batches, 2)
|
||||
self.assertAllClose(batches[0], np.log([1.25, 2]))
|
||||
self.assertAllClose(batches[1], np.log([3.25, 5]))
|
||||
self.assertAllClose(self.evaluate(iterator.get_next()), np.log([1.25, 2]))
|
||||
self.assertAllClose(self.evaluate(iterator.get_next()), np.log([3.25, 5]))
|
||||
|
||||
|
||||
class CheckNumericsCallbackUnhealthyTest(test_util.TensorFlowTestCase):
|
||||
@ -273,23 +267,6 @@ class CheckNumericsCallbackUnhealthyTest(test_util.TensorFlowTestCase):
|
||||
self.assertTrue(re.search(r"Stack trace of op's creation", message))
|
||||
self.assertIn("accum.assign(accum * 2.0)", message)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testNanInConstIsCaptured(self):
|
||||
check_numerics_callback.enable_check_numerics()
|
||||
v = variables.Variable(3.0, dtype=dtypes.float32)
|
||||
@def_function.function
|
||||
def add_a_bad_constant(x):
|
||||
c = constant_op.constant(np.nan)
|
||||
return x + c
|
||||
if not context.executing_eagerly():
|
||||
self.evaluate(v.initializer)
|
||||
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
|
||||
lambda: self.evaluate(add_a_bad_constant(v)))
|
||||
self.assertTrue(re.search(r"graph op.*\"Const\"", message))
|
||||
self.assertTrue(re.search(r"dtype:.*float32", message))
|
||||
self.assertTrue(re.search(r"shape:.*\(\)", message))
|
||||
self.assertTrue(re.search(r"Graph name:.*add_a_bad_constant", message))
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testCatchInfinityInDatasetMapFunction(self):
|
||||
"""Test that callback catches NaN in a tf.dataset map function."""
|
||||
|
@ -173,8 +173,7 @@ class DebugEventsMonitorTest(dumping_callback_test_lib.DumpingCallbackTestBase,
|
||||
self.assertLen(traces[1].debug_tensor_value, 11)
|
||||
self.assertLen(traces[2].debug_tensor_value, 11)
|
||||
elif tensor_debug_mode == "FULL_TENSOR":
|
||||
# [Placeholder:0, Unique:0, Unique:1, Const:0, Sum:0].
|
||||
self.assertLen(traces, 5)
|
||||
self.assertLen(traces, 4) # [Placeholder:0, Unique:0, Unique:1, Sum:0].
|
||||
self.assertEqual(traces[0].op_type, "Placeholder")
|
||||
self.assertEqual(traces[0].output_slot, 0)
|
||||
self.assertIsNone(traces[0].debug_tensor_value)
|
||||
@ -193,16 +192,11 @@ class DebugEventsMonitorTest(dumping_callback_test_lib.DumpingCallbackTestBase,
|
||||
self.assertAllEqual(
|
||||
reader.graph_execution_trace_to_tensor_value(traces[2]),
|
||||
[0, 1, 2, 3, 0])
|
||||
self.assertEqual(traces[3].op_type, "Const")
|
||||
self.assertEqual(traces[3].op_type, "Sum")
|
||||
self.assertEqual(traces[3].output_slot, 0)
|
||||
self.assertIsNone(traces[3].debug_tensor_value)
|
||||
self.assertAllClose(
|
||||
reader.graph_execution_trace_to_tensor_value(traces[3]), [0])
|
||||
self.assertEqual(traces[4].op_type, "Sum")
|
||||
self.assertEqual(traces[4].output_slot, 0)
|
||||
self.assertIsNone(traces[4].debug_tensor_value)
|
||||
self.assertAllClose(
|
||||
reader.graph_execution_trace_to_tensor_value(traces[4]), 17.)
|
||||
reader.graph_execution_trace_to_tensor_value(traces[3]), 17.)
|
||||
|
||||
|
||||
class AlertDataObjectsTest(test_util.TensorFlowTestCase):
|
||||
|
@ -292,12 +292,7 @@ class _DumpingCallback(object):
|
||||
# TODO(cais): Evaluate performance optimization options. For the
|
||||
# `NO_TENSOR` debug mode, an alternative is to add `debug_tensor` as a
|
||||
# control dependency of `tensor.op` without an additional identity op.
|
||||
if (tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR and
|
||||
op_type != "Const"):
|
||||
# NOTE(b/153716279): Under v1 graph mode, overriding the output tensor
|
||||
# of Const ops can lead to downstream errors related to shapes. We opt
|
||||
# to use an identity op to avoid this issue at the cost of slightly
|
||||
# larger graph size.
|
||||
if tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR:
|
||||
return debug_tensor
|
||||
else:
|
||||
identity = array_ops.identity(tensor)
|
||||
@ -535,8 +530,8 @@ class _DumpingCallback(object):
|
||||
is_v1_graph_mode = not ops.executing_eagerly_outside_functions()
|
||||
context_id = self._get_context_id(graph) # Innermost context ID.
|
||||
output_tensor_ids = self._get_symbolic_tensor_ids(len(outputs))
|
||||
if op_type in ("Const", "Placeholder", "PlaceholderWithDefault"):
|
||||
# In some cases, the op name of a Const or Placeholder op in a graph
|
||||
if op_type in ("Placeholder", "PlaceholderWithDefault"):
|
||||
# In some cases, the op name of a Placeholder op in a graph
|
||||
# can be duplicate (e.g., with the name "resource").
|
||||
# When this happens, we give the op an debugger-generated name
|
||||
# in order to prevent problems and check failures down the pipe.
|
||||
|
@ -289,8 +289,7 @@ class DumpingCallbackTest(
|
||||
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
|
||||
reader.update()
|
||||
graph_exec_traces = reader.graph_execution_traces()
|
||||
executed_op_types = [trace.op_type for trace in graph_exec_traces
|
||||
if trace.op_type != "Const"]
|
||||
executed_op_types = [trace.op_type for trace in graph_exec_traces]
|
||||
self.assertCountEqual(
|
||||
executed_op_types,
|
||||
["Placeholder", "Placeholder", "AddV2", "Sub", "RealDiv"])
|
||||
@ -345,46 +344,6 @@ class DumpingCallbackTest(
|
||||
self.assertAllClose(trace.debug_tensor_value,
|
||||
[tensor_id, 19, 1, 8, 8, 0, 0, 0, 0, 0])
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("CurtHealth", "CURT_HEALTH"),
|
||||
("FullTensor", "FULL_TENSOR"),
|
||||
)
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testConstTensorsAreCaptured(self, tensor_debug_mode):
|
||||
writer = dumping_callback.enable_dump_debug_info(
|
||||
self.dump_root, tensor_debug_mode=tensor_debug_mode)
|
||||
@def_function.function
|
||||
def times_two_plus_three(x):
|
||||
return x * constant_op.constant(2.0) + constant_op.constant(3.0)
|
||||
self.assertAllEqual(
|
||||
self.evaluate(times_two_plus_three(10.0)), 23.0)
|
||||
writer.FlushNonExecutionFiles()
|
||||
writer.FlushExecutionFiles()
|
||||
|
||||
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
|
||||
reader.update()
|
||||
const_traces = [trace for trace in reader.graph_execution_traces()
|
||||
if trace.op_type == "Const"]
|
||||
self.assertGreaterEqual(len(const_traces), 3)
|
||||
if tensor_debug_mode == "CURT_HEALTH":
|
||||
# Under CURT_HEALTH, each debug tensor value has the form
|
||||
# [tensor_id, has_inf_or_nan].
|
||||
self.assertLen(const_traces[0].debug_tensor_value, 2)
|
||||
self.assertEqual(const_traces[0].debug_tensor_value[1], 0)
|
||||
self.assertLen(const_traces[1].debug_tensor_value, 2)
|
||||
self.assertEqual(const_traces[1].debug_tensor_value[1], 0)
|
||||
self.assertLen(const_traces[2].debug_tensor_value, 2)
|
||||
self.assertEqual(const_traces[2].debug_tensor_value[1], 0)
|
||||
else: # FULL_TENSOR.
|
||||
const_tensor_values = [
|
||||
reader.graph_execution_trace_to_tensor_value(const_trace)
|
||||
for const_trace in const_traces]
|
||||
# Avoid making assertion on the particular order of the debug tensors
|
||||
# for the three Consts because it may be indeterminate.
|
||||
self.assertIn(10.0, const_tensor_values)
|
||||
self.assertIn(2.0, const_tensor_values)
|
||||
self.assertIn(3.0, const_tensor_values)
|
||||
|
||||
@parameterized.named_parameters(
|
||||
("Shape", "SHAPE"),
|
||||
)
|
||||
@ -408,8 +367,7 @@ class DumpingCallbackTest(
|
||||
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
|
||||
reader.update()
|
||||
graph_exec_traces = reader.graph_execution_traces()
|
||||
executed_op_types = [trace.op_type for trace in graph_exec_traces
|
||||
if trace.op_type != "Const"]
|
||||
executed_op_types = [trace.op_type for trace in graph_exec_traces]
|
||||
self.assertEqual(
|
||||
executed_op_types,
|
||||
["Placeholder", "Placeholder", "LogicalAnd", "LogicalNot"])
|
||||
@ -531,8 +489,7 @@ class DumpingCallbackTest(
|
||||
_, stack_frames = reader.read_graph_op_creation_stack_trace(op_digest)
|
||||
self._verifyStackFrames(stack_frames)
|
||||
|
||||
graph_exec_traces = [trace for trace in reader.graph_execution_traces()
|
||||
if trace.op_type != "Const"]
|
||||
graph_exec_traces = reader.graph_execution_traces()
|
||||
executed_op_types = [digest.op_type for digest in graph_exec_traces]
|
||||
self.assertEqual(
|
||||
executed_op_types,
|
||||
@ -945,10 +902,10 @@ class DumpingCallbackTest(
|
||||
reader.update()
|
||||
graph_exec_digests = reader.graph_execution_traces(digest=True)
|
||||
executed_op_types = [digest.op_type for digest in graph_exec_digests
|
||||
if digest.op_type not in ("Const", "Placeholder")]
|
||||
if digest.op_type != "Placeholder"]
|
||||
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
|
||||
for digest in graph_exec_digests
|
||||
if digest.op_type not in ("Const", "Placeholder")]
|
||||
if digest.op_type != "Placeholder"]
|
||||
|
||||
if tensor_dtypes == [dtypes.float32] and not op_regex:
|
||||
self.assertEqual(executed_op_types, ["Unique", "Sum"])
|
||||
@ -1046,8 +1003,7 @@ class DumpingCallbackTest(
|
||||
self.assertAllClose(tensor_values, [8.0])
|
||||
|
||||
graph_exec_traces = reader.graph_execution_traces()
|
||||
executed_op_types = [trace.op_type for trace in graph_exec_traces
|
||||
if trace.op_type != "Const"]
|
||||
executed_op_types = [trace.op_type for trace in graph_exec_traces]
|
||||
if tensor_debug_mode != "CURT_HEALTH":
|
||||
# Less outputs a boolean tensor, which is not tracked under CURT_HEALTH.
|
||||
# The Less op should have been executed 5 times.
|
||||
|
@ -28,7 +28,6 @@ from tensorflow.core.framework import types_pb2
|
||||
from tensorflow.python.eager import context
|
||||
from tensorflow.python.eager import execute
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import op_callbacks
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.framework import tensor_util
|
||||
@ -300,17 +299,11 @@ def _constant_impl(
|
||||
value, dtype=dtype, shape=shape, verify_shape=verify_shape,
|
||||
allow_broadcast=allow_broadcast))
|
||||
dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
|
||||
attrs = {"value": tensor_value, "dtype": dtype_value}
|
||||
const_tensor = g._create_op_internal( # pylint: disable=protected-access
|
||||
"Const", [], [dtype_value.type], attrs=attrs, name=name).outputs[0]
|
||||
|
||||
if op_callbacks.should_invoke_op_callbacks():
|
||||
# TODO(b/147670703): Once the special-op creation code paths
|
||||
# are unified. Remove this `if` block.
|
||||
callback_outputs = op_callbacks.invoke_op_callbacks(
|
||||
"Const", tuple(), attrs, (const_tensor,), op_name=name, graph=g)
|
||||
if callback_outputs is not None:
|
||||
const_tensor, = callback_outputs
|
||||
"Const", [], [dtype_value.type],
|
||||
attrs={"value": tensor_value,
|
||||
"dtype": dtype_value},
|
||||
name=name).outputs[0]
|
||||
return const_tensor
|
||||
|
||||
|
||||
|
@ -110,8 +110,7 @@ class _NumpyFunctionCallback(object):
|
||||
if compat.as_bytes(op_type) in (_ENTER_OP, _EXIT_OP, _IF_OP, _MERGE_OP,
|
||||
_NEXT_ITERATION_OP, _STATELESS_IF_OP,
|
||||
_SWITCH_OP, _WHILE_OP, _IDENTITY_OP,
|
||||
_VAR_HANDLE_OP, _PLACEHOLDER_OP,
|
||||
_CONSTANT_OP):
|
||||
_VAR_HANDLE_OP, _PLACEHOLDER_OP):
|
||||
# TODO(cais): Overriding the output of StatelessIf, If and While ops
|
||||
# currently fails with error. Investigate (b/139668453).
|
||||
# Avoid instrumenting Identity ops as well, as they are inserted
|
||||
@ -745,7 +744,7 @@ class OpCallbacksTest(test_util.TensorFlowTestCase):
|
||||
def testOverrideDTypeInFuncGraph(self):
|
||||
def to_float64(op_type, inputs, attrs, outputs, op_name=None, graph=None):
|
||||
del inputs, attrs, op_name, graph # Unused.
|
||||
if op_type in ("Const", "Placeholder"):
|
||||
if op_type == "Placeholder":
|
||||
return outputs
|
||||
else:
|
||||
return [math_ops.cast(output, dtypes.float64) for output in outputs]
|
||||
@ -772,17 +771,6 @@ class OpCallbacksTest(test_util.TensorFlowTestCase):
|
||||
self.assertIsNone(w)
|
||||
self.assertEqual(instrument.eager_op_types, [_ADD_OP])
|
||||
|
||||
def testOpCallbackCapturesConstTensors(self):
|
||||
instrument = _NumpyFunctionCallback()
|
||||
op_callbacks.add_op_callback(instrument.callback)
|
||||
|
||||
@def_function.function
|
||||
def times_two_plus_three(x):
|
||||
return x * 2.0 + 3.0
|
||||
|
||||
self.assertAllClose(times_two_plus_three(constant_op.constant(10.0)), 23.0)
|
||||
self.assertEqual(instrument.graph_op_types.count(b"Const"), 2)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def testOpCallbackWorksWithGradientTape(self):
|
||||
instrument = _NumpyFunctionCallback()
|
||||
|
@ -791,10 +791,6 @@ def _ConstantValue(tensor, partial):
|
||||
return np.not_equal(value1, value2)
|
||||
elif tensor.op.type == "StopGradient":
|
||||
return constant_value(tensor.op.inputs[0], partial)
|
||||
elif tensor.op.type == "Identity":
|
||||
return constant_value(tensor.op.inputs[0], partial)
|
||||
elif tensor.op.type in ("CheckNumericsV2", "DebugIdentityV2"):
|
||||
return constant_value(tensor.op.inputs[0], partial)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
@ -188,7 +188,7 @@ class ConfusionMatrixTest(test.TestCase):
|
||||
def testLabelsTooLarge(self):
|
||||
labels = np.asarray([1, 1, 0, 3, 5], dtype=np.int32)
|
||||
predictions = np.asarray([2, 1, 0, 2, 2], dtype=np.int32)
|
||||
with self.assertRaisesOpError("`labels`.*out of bound"):
|
||||
with self.assertRaisesOpError("`labels`.*x < y"):
|
||||
self._testConfMatrix(
|
||||
labels=labels, predictions=predictions, num_classes=3, truth=None)
|
||||
|
||||
@ -203,7 +203,7 @@ class ConfusionMatrixTest(test.TestCase):
|
||||
def testPredictionsTooLarge(self):
|
||||
labels = np.asarray([1, 1, 0, 2, 2], dtype=np.int32)
|
||||
predictions = np.asarray([2, 1, 0, 3, 5], dtype=np.int32)
|
||||
with self.assertRaisesOpError("`predictions`.*out of bound"):
|
||||
with self.assertRaisesOpError("`predictions`.*x < y"):
|
||||
self._testConfMatrix(
|
||||
labels=labels, predictions=predictions, num_classes=3, truth=None)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user