Add LSTM to op spec. This CL only adds the intermediates fields for one variants of LSTM.
PiperOrigin-RevId: 278703956 Change-Id: I4468e546d4a1096b7345d4e96701fec7d4f8ec59
This commit is contained in:
parent
78f7b4165e
commit
60b7d06456
@ -165,6 +165,15 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
|
|||||||
property.version = 2;
|
property.version = 2;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case BuiltinOperator_LSTM: {
|
||||||
|
// TODO(jianlijianli): extend LSTM op spec to inlucde input, bias etc.
|
||||||
|
// TODO(jianlijianli): extend this to other variants of LSTM.
|
||||||
|
// LSTM need 5 intermediate tensors. This agrees with the fully quantized
|
||||||
|
// kernels in lstm_eval.cc
|
||||||
|
property.intermediates = {{0, {}}, {1, {}}, {2, {}}, {3, {}}, {4, {}}};
|
||||||
|
property.version = 2;
|
||||||
|
break;
|
||||||
|
}
|
||||||
case BuiltinOperator_L2_NORMALIZATION: {
|
case BuiltinOperator_L2_NORMALIZATION: {
|
||||||
property.inputs = {{0, {}}};
|
property.inputs = {{0, {}}};
|
||||||
// L2 Norm requires output with 1/128 as scale and 0 as zero point.
|
// L2 Norm requires output with 1/128 as scale and 0 as zero point.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user