Workaround for dynamic intermediate tensor in TFLite.

Fixes #26248

PiperOrigin-RevId: 252971098
This commit is contained in:
Yu-Cheng Ling 2019-06-12 23:40:42 -07:00 committed by TensorFlower Gardener
parent 810f2586ef
commit b81b902c37
5 changed files with 75 additions and 3 deletions

View File

@ -44,6 +44,11 @@ struct AllocationInfo;
// execution. Since dynamic tensors don't have sizes until after the
// corresponding operation is executed, this class supports incremental
// planning.
//
// TODO(b/127354079): Remove the constrain below when the issue is fixed.
// WARNING: MemoryPlanner's behavior must be deterministic. If the first N
// nodes are unchanged, it must produce exactly the same allocation plan for
// the first N nodes.
class ArenaPlanner : public MemoryPlanner {
public:
// Ownership of 'context' is not taken and it must remain util the

View File

@ -155,6 +155,7 @@ Subgraph::Subgraph(ErrorReporter* error_reporter,
: context_(&owned_context_),
error_reporter_(error_reporter),
next_execution_plan_index_to_prepare_(0),
next_execution_plan_index_to_plan_allocation_(0),
external_contexts_(external_contexts),
subgraphs_(subgraphs) {
context_->impl_ = static_cast<void*>(this);
@ -477,6 +478,7 @@ TfLiteStatus Subgraph::AllocateTensors() {
}
next_execution_plan_index_to_prepare_ = 0;
next_execution_plan_index_to_plan_allocation_ = 0;
if (memory_planner_) {
TF_LITE_ENSURE_STATUS(memory_planner_->ResetAllocations());
}
@ -666,10 +668,14 @@ TfLiteStatus Subgraph::PrepareOpsAndTensors() {
TF_LITE_ENSURE_STATUS(PrepareOpsStartingAt(
next_execution_plan_index_to_prepare_, &last_exec_plan_index_prepared));
TF_LITE_ENSURE_STATUS(memory_planner_->ExecuteAllocations(
next_execution_plan_index_to_prepare_, last_exec_plan_index_prepared));
next_execution_plan_index_to_prepare_ = last_exec_plan_index_prepared + 1;
TF_LITE_ENSURE_STATUS(memory_planner_->ExecuteAllocations(
next_execution_plan_index_to_plan_allocation_,
last_exec_plan_index_prepared));
next_execution_plan_index_to_plan_allocation_ =
last_exec_plan_index_prepared + 1;
return kTfLiteOk;
}
@ -743,6 +749,22 @@ TfLiteStatus Subgraph::Invoke() {
if (tensor_resized_since_op_invoke_ &&
HasDynamicTensor(*context_, node.outputs)) {
next_execution_plan_index_to_prepare_ = execution_plan_index + 1;
// This happens when an intermediate dynamic tensor is resized.
// We don't have to prepare all the ops, but we need to recompute
// the allocation plan.
//
// This is a workaround for b/127354079. It relies on the property that
// ArenaPlanner's behavior is deterministic. A better solution is being
// able to "Rewind" to a specific index in ArenaPlanner.
// TODO(b/127354079): Improve ArenaPlanner and remove this mechanism.
if (next_execution_plan_index_to_plan_allocation_ >
next_execution_plan_index_to_prepare_) {
next_execution_plan_index_to_plan_allocation_ = 0;
if (memory_planner_) {
TF_LITE_ENSURE_STATUS(memory_planner_->ResetAllocations());
}
}
}
}

View File

@ -498,6 +498,14 @@ class Subgraph {
// NOTE: this relies on the order of nodes that is in topological order.
int next_execution_plan_index_to_prepare_;
// This is similar to `next_execution_plan_index_to_prepare_`, but it tracks
// which nodes' allocation is planned with the arena planner.
//
// This is a workaround for b/127354079. It shouldn't be necessary if
// ArenaPlanner can "rewind" to a specific point.
// TODO(b/127354079): Improve ArenaPlanner and remove this mechanism.
int next_execution_plan_index_to_plan_allocation_;
// WARNING: This is an experimental interface that is subject to change.
// This is a list of node indices (to index into nodes_and_registration).
// This represents a valid topological sort (dependency ordered) execution

View File

@ -21,6 +21,11 @@ namespace tflite {
// A MemoryPlanner is responsible for planning and executing a number of
// memory-related operations that are necessary in TF Lite.
//
// TODO(b/127354079): Remove the constrain below when the issue is fixed.
// WARNING: MemoryPlanner's behavior must be deterministic. If the first N
// nodes are unchanged, it must produce exactly the same allocation plan for
// the first N nodes.
class MemoryPlanner {
public:
virtual ~MemoryPlanner() {}

View File

@ -819,6 +819,38 @@ class FromSessionTest(test_util.TensorFlowTestCase):
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def testResizingIntermediateDynamicTensor(self):
# This is a regression test for the case where shape of dynamic output
# tensors changes between invocations.
# See also https://github.com/tensorflow/tensorflow/issues/26549
input_tensor = array_ops.placeholder(shape=[1, 1], dtype=dtypes.float32)
input2_tensor = array_ops.placeholder(shape=[1], dtype=dtypes.float32)
# The bug is triggered only when dynamic tensor is intermediate. Putting
# some other ops around it.
neg = math_ops.negative(input2_tensor)
padding = array_ops.placeholder(shape=[2, 2], dtype=dtypes.int32)
output_tensor = array_ops.pad(input_tensor, padding) + neg
sess = session.Session()
converter = lite.TFLiteConverter.from_session(
sess, [input_tensor, padding, input2_tensor], [output_tensor])
tflite_model = converter.convert()
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[1]['index'],
np.array([[1, 1], [1, 1]], dtype=np.int32))
interpreter.invoke()
# Without the fix, invocation will fail when changing the shape of
# intermediate dynamic tensors.
interpreter.set_tensor(input_details[1]['index'],
np.array([[2, 2], [2, 2]], dtype=np.int32))
interpreter.invoke()
@test_util.run_v1_only('Incompatible with 2.0.')
class FromFrozenGraphFile(test_util.TensorFlowTestCase):