Merge pull request #26652 from amitsrivastava78:error_5
PiperOrigin-RevId: 238305243
This commit is contained in:
commit
7edbcf25c1
@ -217,10 +217,10 @@ def dynamic_rnn(cell,
|
||||
parallel_iterations = parallel_iterations or 32
|
||||
if sequence_length is not None:
|
||||
sequence_length = math_ops.cast(sequence_length, dtypes.int32)
|
||||
if sequence_length.get_shape().rank not in (None, 1):
|
||||
if sequence_length.shape.rank not in (None, 1):
|
||||
raise ValueError(
|
||||
"sequence_length must be a vector of length batch_size, "
|
||||
"but saw shape: %s" % sequence_length.get_shape())
|
||||
"but saw shape: %s" % sequence_length.shape)
|
||||
sequence_length = array_ops.identity( # Just to find it in the graph.
|
||||
sequence_length,
|
||||
name="sequence_length")
|
||||
|
@ -436,9 +436,9 @@ class TFLiteLSTMCell(rnn_cell_impl.LayerRNNCell):
|
||||
aggregate="first",
|
||||
index_override=18)
|
||||
|
||||
input_size = inputs.get_shape().with_rank(2)[1]
|
||||
input_size = inputs.shape.with_rank(2)[1]
|
||||
if input_size.value is None:
|
||||
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
|
||||
raise ValueError("Could not infer input size from inputs.shape[-1]")
|
||||
|
||||
inputs_and_m_prev = array_ops.concat([inputs, m_prev], axis=1)
|
||||
|
||||
|
@ -96,7 +96,7 @@ def audio_microfrontend(audio,
|
||||
Raises:
|
||||
ValueError: If the audio tensor is not explicitly a vector.
|
||||
"""
|
||||
audio_shape = audio.get_shape()
|
||||
audio_shape = audio.shape
|
||||
if audio_shape.ndims is None:
|
||||
raise ValueError("Input to `AudioMicrofrontend` should have known rank.")
|
||||
if len(audio_shape) > 1:
|
||||
|
@ -244,7 +244,7 @@ def build_toco_convert_protos(input_tensors,
|
||||
|
||||
Args:
|
||||
input_tensors: List of input tensors. Type and shape are computed using
|
||||
`foo.get_shape()` and `foo.dtype`.
|
||||
`foo.shape` and `foo.dtype`.
|
||||
output_tensors: List of output tensors (only .name is used from this).
|
||||
inference_type: Target data type of real-number arrays in the output file.
|
||||
Must be `{tf.float32, tf.uint8}`. (default tf.float32)
|
||||
@ -347,7 +347,7 @@ def build_toco_convert_protos(input_tensors,
|
||||
"inference_input_type is QUANTIZED_UINT8.")
|
||||
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
|
||||
if input_shapes is None:
|
||||
shape = input_tensor.get_shape()
|
||||
shape = input_tensor.shape
|
||||
else:
|
||||
shape = input_shapes[idx]
|
||||
input_array.shape.dims.extend(map(int, shape))
|
||||
@ -423,7 +423,7 @@ def toco_convert_impl(input_data, input_tensors, output_tensors, *args,
|
||||
Args:
|
||||
input_data: Input data (i.e. often `sess.graph_def`),
|
||||
input_tensors: List of input tensors. Type and shape are computed using
|
||||
`foo.get_shape()` and `foo.dtype`.
|
||||
`foo.shape` and `foo.dtype`.
|
||||
output_tensors: List of output tensors (only .name is used from this).
|
||||
*args: See `build_toco_convert_protos`,
|
||||
**kwargs: See `build_toco_convert_protos`.
|
||||
@ -456,7 +456,7 @@ def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
|
||||
Args:
|
||||
input_data: Input data (i.e. often `sess.graph_def`),
|
||||
input_tensors: List of input tensors. Type and shape are computed using
|
||||
`foo.get_shape()` and `foo.dtype`.
|
||||
`foo.shape` and `foo.dtype`.
|
||||
output_tensors: List of output tensors (only .name is used from this).
|
||||
*args: See `build_toco_convert_protos`,
|
||||
**kwargs: See `build_toco_convert_protos`.
|
||||
|
@ -215,8 +215,7 @@ def set_tensor_shapes(tensors, shapes):
|
||||
tensor.set_shape(shape)
|
||||
except ValueError as error:
|
||||
message = ("The shape of tensor '{0}' cannot be changed from {1} to "
|
||||
"{2}. {3}".format(name, tensor.get_shape(), shape,
|
||||
str(error)))
|
||||
"{2}. {3}".format(name, tensor.shape, shape, str(error)))
|
||||
raise ValueError(message)
|
||||
|
||||
|
||||
|
@ -258,14 +258,14 @@ class TFLiteConverterV2(object):
|
||||
# Checks dimensions in input tensor.
|
||||
for tensor in input_tensors:
|
||||
# Note that shape_list might be empty for scalar shapes.
|
||||
shape_list = tensor.get_shape().as_list()
|
||||
shape_list = tensor.shape.as_list()
|
||||
if None in shape_list[1:]:
|
||||
raise ValueError(
|
||||
"None is only supported in the 1st dimension. Tensor '{0}' has "
|
||||
"invalid shape '{1}'.".format(_tensor_name(tensor), shape_list))
|
||||
elif shape_list and shape_list[0] is None:
|
||||
# Set the batch size to 1 if undefined.
|
||||
shape = tensor.get_shape().as_list()
|
||||
shape = tensor.shape.as_list()
|
||||
shape[0] = 1
|
||||
tensor.set_shape(shape)
|
||||
|
||||
@ -411,7 +411,7 @@ class TFLiteConverter(object):
|
||||
Args:
|
||||
graph_def: Frozen TensorFlow GraphDef.
|
||||
input_tensors: List of input tensors. Type and shape are computed using
|
||||
`foo.get_shape()` and `foo.dtype`.
|
||||
`foo.shape` and `foo.dtype`.
|
||||
output_tensors: List of output tensors (only .name is used from this).
|
||||
input_arrays_with_shape: Tuple of strings representing input tensor names
|
||||
and list of integers representing input shapes
|
||||
@ -460,7 +460,7 @@ class TFLiteConverter(object):
|
||||
Args:
|
||||
sess: TensorFlow Session.
|
||||
input_tensors: List of input tensors. Type and shape are computed using
|
||||
`foo.get_shape()` and `foo.dtype`.
|
||||
`foo.shape` and `foo.dtype`.
|
||||
output_tensors: List of output tensors (only .name is used from this).
|
||||
|
||||
Returns:
|
||||
@ -678,7 +678,7 @@ class TFLiteConverter(object):
|
||||
# Checks dimensions in input tensor.
|
||||
if self._has_valid_tensors():
|
||||
for tensor in self._input_tensors:
|
||||
shape = tensor.get_shape()
|
||||
shape = tensor.shape
|
||||
if not shape:
|
||||
raise ValueError("Provide an input shape for input array "
|
||||
"'{0}'.".format(_tensor_name(tensor)))
|
||||
@ -808,7 +808,7 @@ class TFLiteConverter(object):
|
||||
"use input_shapes parameter.")
|
||||
|
||||
for tensor in self._input_tensors:
|
||||
shape = tensor.get_shape().as_list()
|
||||
shape = tensor.shape.as_list()
|
||||
shape[0] = batch_size
|
||||
tensor.set_shape(shape)
|
||||
|
||||
|
@ -492,9 +492,8 @@ def make_zip_of_tests(zip_path,
|
||||
report["toco"] = report_lib.FAILED
|
||||
report["tf"] = report_lib.SUCCESS
|
||||
# Convert graph to toco
|
||||
input_tensors = [(input_tensor.name.split(":")[0],
|
||||
input_tensor.get_shape(), input_tensor.dtype)
|
||||
for input_tensor in inputs]
|
||||
input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape,
|
||||
input_tensor.dtype) for input_tensor in inputs]
|
||||
output_tensors = [normalize_output_name(out.name) for out in outputs]
|
||||
graph_def = freeze_graph(
|
||||
sess,
|
||||
|
@ -54,7 +54,7 @@ class TocoFromProtosTest(googletest.TestCase):
|
||||
model_flags = model_flags_pb2.ModelFlags()
|
||||
input_array = model_flags.input_arrays.add()
|
||||
input_array.name = TensorName(in_tensor)
|
||||
input_array.shape.dims.extend(map(int, in_tensor.get_shape()))
|
||||
input_array.shape.dims.extend(map(int, in_tensor.shape))
|
||||
model_flags.output_arrays.append(TensorName(out_tensor))
|
||||
# Shell out to run toco (in case it crashes)
|
||||
with tempfile.NamedTemporaryFile() as fp_toco, \
|
||||
|
Loading…
Reference in New Issue
Block a user