Fix an important performance regression for LSTM and GRU in tf 2.0
The issue was caused by auto inline the tf function in eager context, which cause the grappler not able to do the swap the optimization. PiperOrigin-RevId: 251945251
This commit is contained in:
parent
9f2714e03b
commit
e691be7814
@ -1189,6 +1189,9 @@ def _generate_defun_backend(unique_api_name, preferred_device, func):
|
|||||||
function_attributes = {
|
function_attributes = {
|
||||||
_DEFUN_API_NAME_ATTRIBUTE: unique_api_name,
|
_DEFUN_API_NAME_ATTRIBUTE: unique_api_name,
|
||||||
_DEFUN_DEVICE_ATTRIBUTE: preferred_device,
|
_DEFUN_DEVICE_ATTRIBUTE: preferred_device,
|
||||||
|
# TODO(b/133178886): The function is auto inlined in eager context, which
|
||||||
|
# make grappler fail to do the optimization. Force it to not inline here.
|
||||||
|
'_noinline': True,
|
||||||
}
|
}
|
||||||
return function.defun_with_attributes(func=func,
|
return function.defun_with_attributes(func=func,
|
||||||
attributes=function_attributes)
|
attributes=function_attributes)
|
||||||
|
Loading…
Reference in New Issue
Block a user