Improves tf-numpy's clip to pass testClipStaticBounds, and adds uint8 kernels to tf.minimum and tf.maximum.

PiperOrigin-RevId: 307882806
Change-Id: If5d7cf3c8943dd22b5bbf30c3a5582091e626fcb
This commit is contained in:
Peng Wang 2020-04-22 13:16:23 -07:00 committed by TensorFlower Gardener
parent af7c496354
commit 3e3de0fdf5
6 changed files with 14 additions and 13 deletions

View File

@ -19,7 +19,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
DEFINE_BINARY5(maximum, Eigen::half, float, double, int16, int64);
DEFINE_BINARY6(maximum, Eigen::half, float, double, uint8, int16, int64);
} // namespace functor
} // namespace tensorflow

View File

@ -19,7 +19,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
DEFINE_BINARY5(minimum, Eigen::half, float, double, int16, int64);
DEFINE_BINARY6(minimum, Eigen::half, float, double, uint8, int16, int64);
} // namespace functor
} // namespace tensorflow

View File

@ -16,11 +16,11 @@ limitations under the License.
#include "tensorflow/core/kernels/cwise_ops_common.h"
namespace tensorflow {
REGISTER7(BinaryOp, CPU, "Maximum", functor::maximum, float, Eigen::half,
bfloat16, double, int16, int32, int64);
REGISTER8(BinaryOp, CPU, "Maximum", functor::maximum, float, Eigen::half,
bfloat16, double, uint8, int16, int32, int64);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
REGISTER5(BinaryOp, GPU, "Maximum", functor::maximum, float, Eigen::half,
double, int16, int64);
REGISTER6(BinaryOp, GPU, "Maximum", functor::maximum, float, Eigen::half,
double, uint8, int16, int64);
// A special GPU kernel for int32.
// TODO(b/25387198): Also enable int32 in device memory. This kernel

View File

@ -16,11 +16,11 @@ limitations under the License.
#include "tensorflow/core/kernels/cwise_ops_common.h"
namespace tensorflow {
REGISTER7(BinaryOp, CPU, "Minimum", functor::minimum, float, Eigen::half,
bfloat16, double, int16, int32, int64);
REGISTER8(BinaryOp, CPU, "Minimum", functor::minimum, float, Eigen::half,
bfloat16, double, uint8, int16, int32, int64);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
REGISTER5(BinaryOp, GPU, "Minimum", functor::minimum, float, Eigen::half,
double, int16, int64);
REGISTER6(BinaryOp, GPU, "Minimum", functor::minimum, float, Eigen::half,
double, uint8, int16, int64);
// A special GPU kernel for int32.
// TODO(b/25387198): Also enable int32 in device memory. This kernel

View File

@ -549,7 +549,7 @@ REGISTER_OP("Maximum")
.Input("x: T")
.Input("y: T")
.Output("z: T")
.Attr("T: {bfloat16, half, float, double, int16, int32, int64}")
.Attr("T: {bfloat16, half, float, double, uint8, int16, int32, int64}")
.SetShapeFn(shape_inference::BroadcastBinaryOpShapeFn);
// Note: This op is not commutative w.r.t. to all its inputs.
@ -573,7 +573,7 @@ REGISTER_OP("Minimum")
.Input("x: T")
.Input("y: T")
.Output("z: T")
.Attr("T: {bfloat16, half, float, double, int16, int32, int64}")
.Attr("T: {bfloat16, half, float, double, uint8, int16, int32, int64}")
.SetShapeFn(shape_inference::BroadcastBinaryOpShapeFn);
REGISTER_OP("Mod")

View File

@ -733,7 +733,8 @@ class MinMaxOpTest(test.TestCase):
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int16, np.int32, np.int64]:
for t in [np.float16, np.float32, np.float64, np.uint8, np.int16, np.int32,
np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)