diff --git a/tensorflow/compiler/jit/mark_for_compilation_pass.cc b/tensorflow/compiler/jit/mark_for_compilation_pass.cc index 80b3b596763..6da18135233 100644 --- a/tensorflow/compiler/jit/mark_for_compilation_pass.cc +++ b/tensorflow/compiler/jit/mark_for_compilation_pass.cc @@ -507,7 +507,7 @@ Status FindCompilationCandidates( XlaOpRegistry::AutoclusteringPolicy::kAlways; OperationFilter op_filter; - op_filter.allow_resource_ops = registration->compile_resource_ops; + op_filter.allow_resource_ops = registration->compile_all_resource_ops; op_filter.allow_stateful_rng_ops = always_auto_cluster; op_filter.allow_control_trigger = always_auto_cluster; op_filter.allow_dummy_ops = always_auto_cluster; @@ -542,7 +542,7 @@ Status FindCompilationCandidates( continue; } - if (!op_filter.allow_resource_ops && + if (!registration->compile_all_resource_ops && (HasResourceOutput(*node) || IsNonResourceVarResourceOp(*node))) { // We don't have a way of returning values of type DT_RESOURCE from XLA // computations so we avoid auto-clustering nodes producing DT_RESOURCE. @@ -608,8 +608,8 @@ Status FindCompilationCandidates( } // We don't auto-cluster functional control flow nodes containing resource // operations because safety checks are trickier in this case. - // registration->compile_resource_ops is true for XLA_CPU/XLA_GPU but not - // for CPU/GPU. + // registration->compile_all_resource_ops is true for XLA_CPU/XLA_GPU but + // not for CPU/GPU. if (node->type_string() == "While" && !IsCompilableWhile(*node, jit_device_type, op_filter, 0, lib_runtime)) { continue; @@ -936,7 +936,7 @@ static Status IgnoreResourceOpForSafetyAnalysis(const Node& n, bool* ignore) { if (!XlaOpRegistry::GetCompilationDevice(device_type.type(), ®istration)) { *ignore = true; } else { - *ignore = registration->compile_resource_ops; + *ignore = registration->compile_all_resource_ops; } return Status::OK(); } diff --git a/tensorflow/compiler/jit/xla_cpu_device.cc b/tensorflow/compiler/jit/xla_cpu_device.cc index 345e87a5735..f6e73ab7fec 100644 --- a/tensorflow/compiler/jit/xla_cpu_device.cc +++ b/tensorflow/compiler/jit/xla_cpu_device.cc @@ -46,7 +46,7 @@ Status XlaCpuDeviceFactory::CreateDevices( compile_on_demand ? XlaOpRegistry::AutoclusteringPolicy::kIfExplicitlyRequested : XlaOpRegistry::AutoclusteringPolicy::kAlways; - registration.compile_resource_ops = true; + registration.compile_all_resource_ops = true; XlaOpRegistry::RegisterCompilationDevice(DEVICE_XLA_CPU, registration); static XlaDeviceOpRegistrations* registrations = diff --git a/tensorflow/compiler/jit/xla_gpu_device.cc b/tensorflow/compiler/jit/xla_gpu_device.cc index b29f6a009b9..b37926073ac 100644 --- a/tensorflow/compiler/jit/xla_gpu_device.cc +++ b/tensorflow/compiler/jit/xla_gpu_device.cc @@ -66,7 +66,7 @@ Status XlaGpuDeviceFactory::CreateDevices( registration.compilation_device_name = DEVICE_GPU_XLA_JIT; registration.autoclustering_policy = XlaOpRegistry::AutoclusteringPolicy::kAlways; - registration.compile_resource_ops = true; + registration.compile_all_resource_ops = true; XlaOpRegistry::RegisterCompilationDevice(DEVICE_XLA_GPU, registration); static XlaDeviceOpRegistrations* registrations = diff --git a/tensorflow/compiler/jit/xla_interpreter_device.cc b/tensorflow/compiler/jit/xla_interpreter_device.cc index e1a58240615..15f5ddbd7ba 100644 --- a/tensorflow/compiler/jit/xla_interpreter_device.cc +++ b/tensorflow/compiler/jit/xla_interpreter_device.cc @@ -47,7 +47,7 @@ Status XlaInterpreterDeviceFactory::CreateDevices( registration.compilation_device_name = DEVICE_INTERPRETER_XLA_JIT; registration.autoclustering_policy = XlaOpRegistry::AutoclusteringPolicy::kAlways; - registration.compile_resource_ops = true; + registration.compile_all_resource_ops = true; XlaOpRegistry::RegisterCompilationDevice(DEVICE_XLA_INTERPRETER, registration); diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.cc b/tensorflow/compiler/tf2xla/xla_op_registry.cc index 9470c7e334c..1106c027c03 100644 --- a/tensorflow/compiler/tf2xla/xla_op_registry.cc +++ b/tensorflow/compiler/tf2xla/xla_op_registry.cc @@ -148,7 +148,7 @@ XlaOpRegistry::~XlaOpRegistry() = default; cpu_global_jit ? XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally : XlaOpRegistry::AutoclusteringPolicy::kIfExplicitlyRequested; - registration.compile_resource_ops = false; + registration.compile_all_resource_ops = false; } if (LaunchOpHasKernelForDevice(DeviceType(DEVICE_GPU)).ok()) { DeviceRegistration& registration = @@ -156,7 +156,7 @@ XlaOpRegistry::~XlaOpRegistry() = default; registration.compilation_device_name = DEVICE_GPU_XLA_JIT; registration.autoclustering_policy = XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally; - registration.compile_resource_ops = false; + registration.compile_all_resource_ops = false; } return nullptr; }(); diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.h b/tensorflow/compiler/tf2xla/xla_op_registry.h index 80d022b592c..bf4d2e1a9dd 100644 --- a/tensorflow/compiler/tf2xla/xla_op_registry.h +++ b/tensorflow/compiler/tf2xla/xla_op_registry.h @@ -89,7 +89,7 @@ class XlaOpRegistry { AutoclusteringPolicy autoclustering_policy; // Enable compilation of operators that use DT_RESOURCE types? - bool compile_resource_ops = false; + bool compile_all_resource_ops = false; }; // Registers an XLA backend. `compilation_device_name` is the name of the