Removed unused argument.
PiperOrigin-RevId: 327261070 Change-Id: Ie0ccf02d692b4af8616b92340dda8725fcdb0c43
This commit is contained in:
parent
06fd3ef336
commit
77cf50d67b
tensorflow/lite/delegates/gpu/cl
@ -196,8 +196,8 @@ class DefaultTensorTie : public TensorTie {
|
||||
ToTensorStorageType(d.object_def.object_type,
|
||||
d.object_def.data_layout),
|
||||
Layout::BHWC};
|
||||
RETURN_IF_ERROR(AllocateTensorMemory(env->context(), env->device(),
|
||||
shape, desc, &cl_memory_));
|
||||
RETURN_IF_ERROR(
|
||||
AllocateTensorMemory(env->context(), shape, desc, &cl_memory_));
|
||||
if (d.object_def.object_type == ObjectType::OPENCL_TEXTURE) {
|
||||
external_obj_ = OpenClTexture{cl_memory_.memory()};
|
||||
} else {
|
||||
|
@ -59,7 +59,7 @@ absl::Status CheckKernelSupportOfOneLayerTextureArray(Environment* env,
|
||||
Tensor tensor;
|
||||
const BHWC shape(1, 4, 4, 4);
|
||||
RETURN_IF_ERROR(CreateTensor(
|
||||
env->context(), env->device(), shape,
|
||||
env->context(), shape,
|
||||
{DataType::FLOAT32, TensorStorageType::TEXTURE_ARRAY, Layout::HWC},
|
||||
&tensor));
|
||||
RETURN_IF_ERROR(kernel.SetMemory(0, tensor.GetMemoryPtr()));
|
||||
|
@ -501,7 +501,7 @@ absl::Status InferenceContext::AllocateMemoryForStrongShapes(
|
||||
graph_ids_to_strong_shape_tensors_[t.first] = id;
|
||||
const auto& it = strong_shape_tensors_.find(id);
|
||||
if (it == strong_shape_tensors_.end()) {
|
||||
RETURN_IF_ERROR(CreateTensor(*context, device, shape, t.second,
|
||||
RETURN_IF_ERROR(CreateTensor(*context, shape, t.second,
|
||||
&strong_shape_tensors_[id]));
|
||||
}
|
||||
}
|
||||
|
@ -34,8 +34,7 @@ absl::Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
|
||||
return absl::InvalidArgumentError(
|
||||
"Layout doesn't have Batch dimension, but shape.b != 1");
|
||||
}
|
||||
RETURN_IF_ERROR(CreateTensor(*creation_context.context,
|
||||
*creation_context.device, src_shape,
|
||||
RETURN_IF_ERROR(CreateTensor(*creation_context.context, src_shape,
|
||||
op_def.src_tensors[0], &src[i]));
|
||||
RETURN_IF_ERROR(src[i].WriteData(creation_context.queue, src_cpu[i]));
|
||||
operation->SetSrc(&src[i], i);
|
||||
@ -48,8 +47,7 @@ absl::Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
|
||||
return absl::InvalidArgumentError(
|
||||
"Layout doesn't have Batch dimension, but shape.b != 1");
|
||||
}
|
||||
RETURN_IF_ERROR(CreateTensor(*creation_context.context,
|
||||
*creation_context.device, dst_shape,
|
||||
RETURN_IF_ERROR(CreateTensor(*creation_context.context, dst_shape,
|
||||
op_def.dst_tensors[0], &dst[i]));
|
||||
|
||||
operation->SetDst(&dst[i], i);
|
||||
|
@ -171,9 +171,8 @@ absl::Status CreateElementwiseTwoInput(
|
||||
definition.GetDataType(), Layout::HWC);
|
||||
TensorDescriptor desc{definition.GetDataType(), storage_type, Layout::HWC};
|
||||
Tensor gpu_tensor;
|
||||
RETURN_IF_ERROR(CreateTensor(*creation_context.context,
|
||||
*creation_context.device, shape, desc,
|
||||
&gpu_tensor));
|
||||
RETURN_IF_ERROR(
|
||||
CreateTensor(*creation_context.context, shape, desc, &gpu_tensor));
|
||||
RETURN_IF_ERROR(
|
||||
gpu_tensor.WriteData(creation_context.queue, constant_tensor));
|
||||
|
||||
@ -209,9 +208,8 @@ absl::Status CreateElementwiseTwoInput(
|
||||
definition.GetDataType(), Layout::HWC);
|
||||
TensorDescriptor desc{definition.GetDataType(), storage_type, Layout::HWC};
|
||||
Tensor gpu_tensor;
|
||||
RETURN_IF_ERROR(CreateTensor(*creation_context.context,
|
||||
*creation_context.device, shape, desc,
|
||||
&gpu_tensor));
|
||||
RETURN_IF_ERROR(
|
||||
CreateTensor(*creation_context.context, shape, desc, &gpu_tensor));
|
||||
RETURN_IF_ERROR(
|
||||
gpu_tensor.WriteData(creation_context.queue, constant_tensor));
|
||||
|
||||
|
@ -53,15 +53,13 @@ absl::Status CreateImageBufferFromBuffer(const CLContext& context,
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
|
||||
const BHWDC& shape,
|
||||
absl::Status CreateTensor(const CLContext& context, const BHWDC& shape,
|
||||
const TensorDescriptor& descriptor, cl_mem memory,
|
||||
Tensor* result) {
|
||||
const bool memory_owner = memory == nullptr;
|
||||
if (memory_owner) {
|
||||
CLMemory mem;
|
||||
RETURN_IF_ERROR(
|
||||
AllocateTensorMemory(context, device, shape, descriptor, &mem));
|
||||
RETURN_IF_ERROR(AllocateTensorMemory(context, shape, descriptor, &mem));
|
||||
memory = mem.Release();
|
||||
}
|
||||
if (descriptor.storage_type == TensorStorageType::IMAGE_BUFFER) {
|
||||
@ -434,17 +432,15 @@ absl::Status Tensor::ReadData(CLCommandQueue* queue,
|
||||
return ReadDataBHWDC(absl::MakeSpan(dst->data), queue);
|
||||
}
|
||||
|
||||
absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
|
||||
const BHWC& shape, const TensorDescriptor& descriptor,
|
||||
Tensor* result) {
|
||||
absl::Status CreateTensor(const CLContext& context, const BHWC& shape,
|
||||
const TensorDescriptor& descriptor, Tensor* result) {
|
||||
const BHWDC shape5D(shape.b, shape.h, shape.w, 1, shape.c);
|
||||
return CreateTensor(context, device, shape5D, descriptor, nullptr, result);
|
||||
return CreateTensor(context, shape5D, descriptor, nullptr, result);
|
||||
}
|
||||
|
||||
absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
|
||||
const BHWDC& shape,
|
||||
absl::Status CreateTensor(const CLContext& context, const BHWDC& shape,
|
||||
const TensorDescriptor& descriptor, Tensor* result) {
|
||||
return CreateTensor(context, device, shape, descriptor, nullptr, result);
|
||||
return CreateTensor(context, shape, descriptor, nullptr, result);
|
||||
}
|
||||
|
||||
absl::Status CreateSharedTensor(const CLContext& context, cl_mem memory,
|
||||
@ -462,16 +458,14 @@ absl::Status CreateSharedTensor(const CLContext& context, cl_mem memory,
|
||||
return CreateTensorShared(context, shape, descriptor, memory, result);
|
||||
}
|
||||
|
||||
absl::Status AllocateTensorMemory(const CLContext& context,
|
||||
const CLDevice& device, const BHWC& shape,
|
||||
absl::Status AllocateTensorMemory(const CLContext& context, const BHWC& shape,
|
||||
const TensorDescriptor& descriptor,
|
||||
CLMemory* result) {
|
||||
const BHWDC shape5D(shape.b, shape.h, shape.w, 1, shape.c);
|
||||
return AllocateTensorMemory(context, device, shape5D, descriptor, result);
|
||||
return AllocateTensorMemory(context, shape5D, descriptor, result);
|
||||
}
|
||||
|
||||
absl::Status AllocateTensorMemory(const CLContext& context,
|
||||
const CLDevice& device, const BHWDC& shape,
|
||||
absl::Status AllocateTensorMemory(const CLContext& context, const BHWDC& shape,
|
||||
const TensorDescriptor& descriptor,
|
||||
CLMemory* result) {
|
||||
const int slices = DivideRoundUp(shape.c, 4);
|
||||
|
@ -147,22 +147,18 @@ class Tensor : public GPUObject {
|
||||
|
||||
using TensorPtr = std::shared_ptr<Tensor>;
|
||||
|
||||
absl::Status AllocateTensorMemory(const CLContext& context,
|
||||
const CLDevice& device, const BHWC& shape,
|
||||
absl::Status AllocateTensorMemory(const CLContext& context, const BHWC& shape,
|
||||
const TensorDescriptor& descriptor,
|
||||
CLMemory* result);
|
||||
|
||||
absl::Status AllocateTensorMemory(const CLContext& context,
|
||||
const CLDevice& device, const BHWDC& shape,
|
||||
absl::Status AllocateTensorMemory(const CLContext& context, const BHWDC& shape,
|
||||
const TensorDescriptor& descriptor,
|
||||
CLMemory* result);
|
||||
|
||||
absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
|
||||
const BHWC& shape, const TensorDescriptor& descriptor,
|
||||
Tensor* result);
|
||||
absl::Status CreateTensor(const CLContext& context, const BHWC& shape,
|
||||
const TensorDescriptor& descriptor, Tensor* result);
|
||||
|
||||
absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
|
||||
const BHWDC& shape,
|
||||
absl::Status CreateTensor(const CLContext& context, const BHWDC& shape,
|
||||
const TensorDescriptor& descriptor, Tensor* result);
|
||||
|
||||
absl::Status CreateSharedTensor(const CLContext& context, cl_mem memory,
|
||||
|
@ -47,8 +47,7 @@ absl::Status TensorGenericTest(const BHWC& shape,
|
||||
}
|
||||
|
||||
Tensor tensor;
|
||||
RETURN_IF_ERROR(
|
||||
CreateTensor(env->context(), env->device(), shape, descriptor, &tensor));
|
||||
RETURN_IF_ERROR(CreateTensor(env->context(), shape, descriptor, &tensor));
|
||||
RETURN_IF_ERROR(tensor.WriteData(env->queue(), tensor_cpu));
|
||||
RETURN_IF_ERROR(tensor.ReadData(env->queue(), &tensor_gpu));
|
||||
|
||||
@ -77,8 +76,7 @@ absl::Status Tensor5DGenericTest(const BHWDC& shape,
|
||||
}
|
||||
|
||||
Tensor tensor;
|
||||
RETURN_IF_ERROR(
|
||||
CreateTensor(env->context(), env->device(), shape, descriptor, &tensor));
|
||||
RETURN_IF_ERROR(CreateTensor(env->context(), shape, descriptor, &tensor));
|
||||
RETURN_IF_ERROR(tensor.WriteData(env->queue(), tensor_cpu));
|
||||
RETURN_IF_ERROR(tensor.ReadData(env->queue(), &tensor_gpu));
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user