Move uses of tensorflow::MakeTypeIndex() to tensorflow::TypeIndex::Make.

PiperOrigin-RevId: 317920618
Change-Id: I7af52fdf92c77858ffa897f6d5449bfb0213f4e5
This commit is contained in:
A. Unique TensorFlower 2020-06-23 12:36:13 -07:00 committed by TensorFlower Gardener
parent 95428e83f5
commit e8c972652a
16 changed files with 45 additions and 43 deletions

View File

@ -764,8 +764,8 @@ Tensor GetResourceHandle(const string& var_name, const string& container,
handle.set_device(device_name);
handle.set_container(container);
handle.set_name(var_name);
handle.set_hash_code(MakeTypeIndex<Var>().hash_code());
handle.set_maybe_type_name(MakeTypeIndex<Var>().name());
handle.set_hash_code(TypeIndex::Make<Var>().hash_code());
handle.set_maybe_type_name(TypeIndex::Make<Var>().name());
Tensor tensor(DT_RESOURCE, TensorShape({}));
tensor.scalar<ResourceHandle>()() = handle;
return tensor;

View File

@ -301,7 +301,7 @@ ResourceHandle MakeResourceHandle(
return MakeResourceHandle(
container.empty() ? ctx->resource_manager()->default_container()
: container,
name, *ctx->device(), MakeTypeIndex<T>(), dtypes_and_shapes);
name, *ctx->device(), TypeIndex::Make<T>(), dtypes_and_shapes);
}
template <typename T>
@ -311,7 +311,7 @@ ResourceHandle MakeResourceHandle(
return MakeResourceHandle(
container.empty() ? ctx->resource_manager()->default_container()
: container,
name, *ctx->device(), MakeTypeIndex<T>(), dtypes_and_shapes);
name, *ctx->device(), TypeIndex::Make<T>(), dtypes_and_shapes);
}
Status MakeResourceHandleToOutput(OpKernelContext* context, int output_index,
@ -589,7 +589,7 @@ Status ResourceMgr::Create(const string& container, const string& name,
CheckDeriveFromResourceBase<T>();
CHECK(resource != nullptr);
mutex_lock l(mu_);
return DoCreate(container, MakeTypeIndex<T>(), name, resource);
return DoCreate(container, TypeIndex::Make<T>(), name, resource);
}
template <typename T, bool use_dynamic_cast>
@ -635,7 +635,7 @@ template <typename T, bool use_dynamic_cast>
Status ResourceMgr::LookupInternal(const string& container, const string& name,
T** resource) const {
ResourceBase* found = nullptr;
Status s = DoLookup(container, MakeTypeIndex<T>(), name, &found);
Status s = DoLookup(container, TypeIndex::Make<T>(), name, &found);
if (s.ok()) {
// It's safe to down cast 'found' to T* since
// typeid(T).hash_code() is part of the map key.
@ -660,7 +660,7 @@ Status ResourceMgr::LookupOrCreate(const string& container, const string& name,
s = LookupInternal<T, use_dynamic_cast>(container, name, resource);
if (s.ok()) return s;
TF_RETURN_IF_ERROR(creator(resource));
s = DoCreate(container, MakeTypeIndex<T>(), name, *resource);
s = DoCreate(container, TypeIndex::Make<T>(), name, *resource);
if (!s.ok()) {
return errors::Internal("LookupOrCreate failed unexpectedly");
}
@ -671,7 +671,7 @@ Status ResourceMgr::LookupOrCreate(const string& container, const string& name,
template <typename T>
Status ResourceMgr::Delete(const string& container, const string& name) {
CheckDeriveFromResourceBase<T>();
return DoDelete(container, MakeTypeIndex<T>(), name);
return DoDelete(container, TypeIndex::Make<T>(), name);
}
template <typename T>
@ -710,7 +710,7 @@ Status ValidateDevice(OpKernelContext* ctx, const ResourceHandle& p);
template <typename T>
Status ValidateDeviceAndType(OpKernelContext* ctx, const ResourceHandle& p) {
TF_RETURN_IF_ERROR(internal::ValidateDevice(ctx, p));
auto type_index = MakeTypeIndex<T>();
auto type_index = TypeIndex::Make<T>();
if (type_index.hash_code() != p.hash_code()) {
return errors::InvalidArgument(
"Trying to access resource using the wrong type. Expected ",
@ -883,7 +883,7 @@ ResourceHandle ScopedStepContainer::MakeResourceHandle(
mutex_lock ml(mu_);
dirty_ = true;
return tensorflow::MakeResourceHandle(container_, name, device,
MakeTypeIndex<T>(), {});
TypeIndex::Make<T>(), {});
}
template <typename T>

View File

@ -105,7 +105,7 @@ class ResourceOpKernel : public OpKernel {
if (has_resource_type_) {
OP_REQUIRES_OK(context, MakeResourceHandleToOutput(
context, 0, cinfo_.container(), cinfo_.name(),
MakeTypeIndex<T>()));
TypeIndex::Make<T>()));
} else {
context->set_output_ref(0, &mu_, handle_.AccessTensor(context));
}

View File

@ -144,7 +144,7 @@ void EncodeVariant(const T& value, string* buf);
// Variant y_type_unknown = serialized_proto_f; // Store serialized Variant.
//
// EXPECT_EQ(x.TypeName(), y_type_unknown.TypeName()); // Looks like Foo.
// EXPECT_EQ(MakeTypeIndex<VariantTensorDataProto>(),
// EXPECT_EQ(TypeIndex::Make<VariantTensorDataProto>(),
// y_type_unknown.TypeId());
//
class Variant {
@ -227,7 +227,7 @@ class Variant {
// of the original type when a TensorValueDataProto is stored as the
// value. In this case, it returns the TypeIndex of TensorValueDataProto.
TypeIndex TypeId() const {
const TypeIndex VoidTypeIndex = MakeTypeIndex<void>();
const TypeIndex VoidTypeIndex = TypeIndex::Make<void>();
if (is_empty()) {
return VoidTypeIndex;
}
@ -244,7 +244,7 @@ class Variant {
// otherwise.
template <typename T>
T* get() {
const TypeIndex TTypeIndex = MakeTypeIndex<T>();
const TypeIndex TTypeIndex = TypeIndex::Make<T>();
if (is_empty() || (TTypeIndex != TypeId())) return nullptr;
return std::addressof(static_cast<Variant::Value<T>*>(GetValue())->value);
}
@ -253,7 +253,7 @@ class Variant {
// otherwise.
template <typename T>
const T* get() const {
const TypeIndex TTypeIndex = MakeTypeIndex<T>();
const TypeIndex TTypeIndex = TypeIndex::Make<T>();
if (is_empty() || (TTypeIndex != TypeId())) return nullptr;
return std::addressof(
static_cast<const Variant::Value<T>*>(GetValue())->value);
@ -333,7 +333,7 @@ class Variant {
TypeIndex TypeId() const final {
const TypeIndex value_type_index =
MakeTypeIndex<typename std::decay<T>::type>();
TypeIndex::Make<typename std::decay<T>::type>();
return value_type_index;
}

View File

@ -160,7 +160,7 @@ string TypeNameVariantImpl(
const T& value,
TypeNameResolver<T, false /* has_type_name */, false /* Tensor */,
false /* protobuf */>) {
return port::MaybeAbiDemangle(MakeTypeIndex<T>().name());
return port::MaybeAbiDemangle(TypeIndex::Make<T>().name());
}
template <typename T>

View File

@ -521,7 +521,7 @@ class UnaryVariantBinaryOpRegistration {
#define INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION(T, direction, \
device_copy_fn) \
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION_UNIQ_HELPER( \
__COUNTER__, T, direction, MakeTypeIndex<T>(), device_copy_fn)
__COUNTER__, T, direction, TypeIndex::Make<T>(), device_copy_fn)
#define INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION_UNIQ_HELPER( \
ctr, T, direction, type_index, device_copy_fn) \
@ -542,7 +542,7 @@ class UnaryVariantBinaryOpRegistration {
#define REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION(op, device, T, \
unary_op_function) \
REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION_UNIQ_HELPER( \
__COUNTER__, op, device, T, MakeTypeIndex<T>(), unary_op_function)
__COUNTER__, op, device, T, TypeIndex::Make<T>(), unary_op_function)
#define REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION_UNIQ_HELPER( \
ctr, op, device, T, type_index, unary_op_function) \
@ -563,7 +563,7 @@ class UnaryVariantBinaryOpRegistration {
#define REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION(op, device, T, \
binary_op_function) \
REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION_UNIQ_HELPER( \
__COUNTER__, op, device, T, MakeTypeIndex<T>(), binary_op_function)
__COUNTER__, op, device, T, TypeIndex::Make<T>(), binary_op_function)
#define REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION_UNIQ_HELPER( \
ctr, op, device, T, type_index, binary_op_function) \

View File

@ -155,12 +155,12 @@ TEST(VariantOpCopyToGPURegistryTest, TestBasic) {
// No registered copy fn for GPU<->GPU.
EXPECT_EQ(UnaryVariantOpRegistry::Global()->GetDeviceCopyFn(
VariantDeviceCopyDirection::DEVICE_TO_DEVICE,
MakeTypeIndex<VariantValue>()),
TypeIndex::Make<VariantValue>()),
nullptr);
auto* copy_to_gpu_fn = UnaryVariantOpRegistry::Global()->GetDeviceCopyFn(
VariantDeviceCopyDirection::HOST_TO_DEVICE,
MakeTypeIndex<VariantValue>());
TypeIndex::Make<VariantValue>());
EXPECT_NE(copy_to_gpu_fn, nullptr);
VariantValue vv{true /* early_exit */};
@ -183,7 +183,7 @@ TEST(VariantOpCopyToGPURegistryTest, TestDuplicate) {
UnaryVariantOpRegistry registry;
UnaryVariantOpRegistry::AsyncVariantDeviceCopyFn f;
class FjFjFj {};
const auto kTypeIndex = MakeTypeIndex<FjFjFj>();
const auto kTypeIndex = TypeIndex::Make<FjFjFj>();
registry.RegisterDeviceCopyFn(VariantDeviceCopyDirection::HOST_TO_DEVICE,
kTypeIndex, f);
EXPECT_DEATH(registry.RegisterDeviceCopyFn(
@ -193,9 +193,10 @@ TEST(VariantOpCopyToGPURegistryTest, TestDuplicate) {
TEST(VariantOpZerosLikeRegistryTest, TestBasicCPU) {
class Blah {};
EXPECT_EQ(UnaryVariantOpRegistry::Global()->GetUnaryOpFn(
ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_CPU, MakeTypeIndex<Blah>()),
nullptr);
EXPECT_EQ(
UnaryVariantOpRegistry::Global()->GetUnaryOpFn(
ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_CPU, TypeIndex::Make<Blah>()),
nullptr);
VariantValue vv_early_exit{true /* early_exit */, 0 /* value */};
Variant v = vv_early_exit;
@ -218,9 +219,10 @@ TEST(VariantOpZerosLikeRegistryTest, TestBasicCPU) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TEST(VariantOpUnaryOpRegistryTest, TestBasicGPU) {
class Blah {};
EXPECT_EQ(UnaryVariantOpRegistry::Global()->GetUnaryOpFn(
ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_GPU, MakeTypeIndex<Blah>()),
nullptr);
EXPECT_EQ(
UnaryVariantOpRegistry::Global()->GetUnaryOpFn(
ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_GPU, TypeIndex::Make<Blah>()),
nullptr);
VariantValue vv_early_exit{true /* early_exit */, 0 /* value */};
Variant v = vv_early_exit;
@ -245,7 +247,7 @@ TEST(VariantOpUnaryOpRegistryTest, TestDuplicate) {
UnaryVariantOpRegistry registry;
UnaryVariantOpRegistry::VariantUnaryOpFn f;
class FjFjFj {};
const auto kTypeIndex = MakeTypeIndex<FjFjFj>();
const auto kTypeIndex = TypeIndex::Make<FjFjFj>();
registry.RegisterUnaryOpFn(ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_CPU,
kTypeIndex, f);
@ -263,7 +265,7 @@ TEST(VariantOpUnaryOpRegistryTest, TestDuplicate) {
TEST(VariantOpAddRegistryTest, TestBasicCPU) {
class Blah {};
EXPECT_EQ(UnaryVariantOpRegistry::Global()->GetBinaryOpFn(
ADD_VARIANT_BINARY_OP, DEVICE_CPU, MakeTypeIndex<Blah>()),
ADD_VARIANT_BINARY_OP, DEVICE_CPU, TypeIndex::Make<Blah>()),
nullptr);
VariantValue vv_early_exit{true /* early_exit */, 3 /* value */};
@ -290,7 +292,7 @@ TEST(VariantOpAddRegistryTest, TestBasicCPU) {
TEST(VariantOpAddRegistryTest, TestBasicGPU) {
class Blah {};
EXPECT_EQ(UnaryVariantOpRegistry::Global()->GetBinaryOpFn(
ADD_VARIANT_BINARY_OP, DEVICE_GPU, MakeTypeIndex<Blah>()),
ADD_VARIANT_BINARY_OP, DEVICE_GPU, TypeIndex::Make<Blah>()),
nullptr);
VariantValue vv_early_exit{true /* early_exit */, 3 /* value */};
@ -318,7 +320,7 @@ TEST(VariantOpAddRegistryTest, TestDuplicate) {
UnaryVariantOpRegistry registry;
UnaryVariantOpRegistry::VariantBinaryOpFn f;
class FjFjFj {};
const auto kTypeIndex = MakeTypeIndex<FjFjFj>();
const auto kTypeIndex = TypeIndex::Make<FjFjFj>();
registry.RegisterBinaryOpFn(ADD_VARIANT_BINARY_OP, DEVICE_CPU, kTypeIndex, f);
EXPECT_DEATH(registry.RegisterBinaryOpFn(ADD_VARIANT_BINARY_OP, DEVICE_CPU,

View File

@ -589,7 +589,7 @@ TEST(VariantTest, TensorListTest) {
serialized.ToProto(&data);
const Variant y_unknown = data;
EXPECT_EQ(y_unknown.TypeName(), "TensorList");
EXPECT_EQ(y_unknown.TypeId(), MakeTypeIndex<VariantTensorDataProto>());
EXPECT_EQ(y_unknown.TypeId(), TypeIndex::Make<VariantTensorDataProto>());
EXPECT_EQ(y_unknown.DebugString(),
strings::StrCat(
"Variant<type: TensorList value: ", data.DebugString(), ">"));

View File

@ -90,7 +90,7 @@ class ResourceConditionalAccumulatorOp : public ConditionalAccumulatorBaseOp {
h(1) = cinfo_.name();
OP_REQUIRES_OK(ctx, MakeResourceHandleToOutput(
ctx, 0, cinfo_.container(), cinfo_.name(),
MakeTypeIndex<ConditionalAccumulatorBase>()));
TypeIndex::Make<ConditionalAccumulatorBase>()));
}
TF_DISALLOW_COPY_AND_ASSIGN(ResourceConditionalAccumulatorOp);

View File

@ -35,7 +35,7 @@ Status CreateHandle(OpKernelContext* ctx, T* resource,
TF_RETURN_IF_ERROR(mgr->Create<T>(container_name, unique_name, resource));
*handle = MakeResourceHandle(container_name, unique_name, *ctx->device(),
MakeTypeIndex<T>());
TypeIndex::Make<T>());
return Status::OK();
}

View File

@ -111,7 +111,7 @@ class ThreadPoolHandleOp : public OpKernel {
}
OP_REQUIRES_OK(ctx, MakeResourceHandleToOutput(
ctx, 0, cinfo_.container(), cinfo_.name(),
MakeTypeIndex<ThreadPoolResource>()));
TypeIndex::Make<ThreadPoolResource>()));
}
private:

View File

@ -443,7 +443,7 @@ void IteratorHandleOp::Compute(OpKernelContext* context)
}
OP_REQUIRES_OK(context, MakeResourceHandleToOutput(
context, 0, cinfo_.container(), cinfo_.name(),
MakeTypeIndex<IteratorResource>()));
TypeIndex::Make<IteratorResource>()));
}
Status IteratorHandleOp::VerifyResource(IteratorResource* resource) {

View File

@ -475,7 +475,7 @@ class MultiDeviceIteratorHandleOp : public OpKernel {
}
OP_REQUIRES_OK(context, MakeResourceHandleToOutput(
context, 0, container_name, unique_name,
MakeTypeIndex<MultiDeviceIterator>()));
TypeIndex::Make<MultiDeviceIterator>()));
}
private:

View File

@ -126,7 +126,7 @@ class OpsTestBase : public ::testing::Test {
std::string container_name =
container.empty() ? rm->default_container() : container;
EXPECT_TRUE(rm->Create(container_name, name, resource).ok());
AddResourceInputInternal(container_name, name, MakeTypeIndex<T>());
AddResourceInputInternal(container_name, name, TypeIndex::Make<T>());
}
// Runs an operation producing 'num_outputs' outputs.

View File

@ -554,7 +554,7 @@ inline void TileGradientOp<Device, Tmultiples>::HandleCase(
OpKernelContext* context, const std::vector<Tmultiples>& input_dims,
const gtl::ArraySlice<Tmultiples>& multiples_array, Tensor* result) {
LOG(FATAL) << "TileGradientOp: Invalid combination of Device, DT and NDIM: "
<< MakeTypeIndex<Device>().name() << ", " << DataTypeString(DT)
<< TypeIndex::Make<Device>().name() << ", " << DataTypeString(DT)
<< ", " << NDIM;
}

View File

@ -23,14 +23,14 @@ namespace tensorflow {
struct MyRandomPODType {};
TEST(AbiTest, AbiDemangleTest) {
EXPECT_EQ(port::MaybeAbiDemangle(MakeTypeIndex<int>().name()), "int");
EXPECT_EQ(port::MaybeAbiDemangle(TypeIndex::Make<int>().name()), "int");
#ifdef PLATFORM_WINDOWS
const char pod_type_name[] = "struct tensorflow::MyRandomPODType";
#else
const char pod_type_name[] = "tensorflow::MyRandomPODType";
#endif
EXPECT_EQ(port::MaybeAbiDemangle(MakeTypeIndex<MyRandomPODType>().name()),
EXPECT_EQ(port::MaybeAbiDemangle(TypeIndex::Make<MyRandomPODType>().name()),
pod_type_name);
EXPECT_EQ(