Add int32 type support for BiasAdd. CPU support already exists, so adding GPU support only.

PiperOrigin-RevId: 277818665
Change-Id: I174cdab534dc286e7ddd221e95bdef73e372097c
This commit is contained in:
Sung Jin Hwang 2019-10-31 15:53:55 -07:00 committed by TensorFlower Gardener
parent ff133e632f
commit ef9eb1cf64
3 changed files with 5 additions and 1 deletions

View File

@ -379,6 +379,7 @@ class BiasOp<GPUDevice, T> : public BinaryOp<T> {
BiasOp<GPUDevice, type>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_KERNEL);
REGISTER_GPU_KERNEL(int32);
#undef REGISTER_GPU_KERNEL
struct BiasGradAutotuneGroup {

View File

@ -289,6 +289,9 @@ void BiasGradGPU<T>::DoColReduction(OpKernelContext* context, T* output,
TF_CALL_GPU_NUMBER_TYPES(DEFINE_GPU_SPECS);
// No BiasGrad kernel for int32.
template struct BiasGPU<int32>;
} // end namespace tensorflow
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM

View File

@ -83,7 +83,7 @@ class BiasAddTestBase(test.TestCase):
def _testAll(self, np_inputs, np_bias):
self._testBias(np_inputs, np_bias, use_gpu=False)
self._testBiasNCHW(np_inputs, np_bias, use_gpu=False)
if np_inputs.dtype in [np.float16, np.float32, np.float64]:
if np_inputs.dtype in [np.float16, np.float32, np.float64, np.int32]:
self._testBias(np_inputs, np_bias, use_gpu=True)
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)