Remove integer dtypes from Soft{plus,sign} OpDefs.

These ops were never intended to support integer dtypes, and the
OpKernels have already been removed in a previous patch.

PiperOrigin-RevId: 214542750
This commit is contained in:
Todd Wang 2018-09-25 19:13:30 -07:00 committed by TensorFlower Gardener
parent 8adf133448
commit 3f4b8c1381
5 changed files with 116 additions and 52 deletions

View File

@ -60084,6 +60084,29 @@ op {
}
}
}
op {
name: "Softplus"
input_arg {
name: "features"
type_attr: "T"
}
output_arg {
name: "activations"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
}
op {
name: "SoftplusGrad"
input_arg {
@ -60220,6 +60243,33 @@ op {
}
}
}
op {
name: "SoftplusGrad"
input_arg {
name: "gradients"
type_attr: "T"
}
input_arg {
name: "features"
type_attr: "T"
}
output_arg {
name: "backprops"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
}
op {
name: "Softsign"
input_arg {
@ -60340,6 +60390,29 @@ op {
}
}
}
op {
name: "Softsign"
input_arg {
name: "features"
type_attr: "T"
}
output_arg {
name: "activations"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
}
op {
name: "SoftsignGrad"
input_arg {
@ -60476,6 +60549,33 @@ op {
}
}
}
op {
name: "SoftsignGrad"
input_arg {
name: "gradients"
type_attr: "T"
}
input_arg {
name: "features"
type_attr: "T"
}
output_arg {
name: "backprops"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
}
op {
name: "SpaceToBatch"
input_arg {

View File

@ -1009,32 +1009,30 @@ REGISTER_OP("SeluGrad")
.Attr("T: {half, bfloat16, float, double}")
.SetShapeFn(shape_inference::MergeBothInputsShapeFn);
// TODO(b/111515541): change T to {half, bfloat16, float, double}
REGISTER_OP("Softplus")
.Input("features: T")
.Output("activations: T")
.Attr("T: realnumbertype")
.Attr("T: {half, bfloat16, float, double}")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("SoftplusGrad")
.Input("gradients: T")
.Input("features: T")
.Output("backprops: T")
.Attr("T: realnumbertype")
.Attr("T: {half, bfloat16, float, double}")
.SetShapeFn(shape_inference::MergeBothInputsShapeFn);
// TODO(b/111515541): change T to {half, bfloat16, float, double}
REGISTER_OP("Softsign")
.Input("features: T")
.Output("activations: T")
.Attr("T: realnumbertype")
.Attr("T: {half, bfloat16, float, double}")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("SoftsignGrad")
.Input("gradients: T")
.Input("features: T")
.Output("backprops: T")
.Attr("T: realnumbertype")
.Attr("T: {half, bfloat16, float, double}")
.SetShapeFn(shape_inference::MergeBothInputsShapeFn);
// --------------------------------------------------------------------------

View File

@ -28714,18 +28714,10 @@ op {
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_UINT8
type: DT_INT16
type: DT_INT8
type: DT_INT64
type: DT_BFLOAT16
type: DT_UINT16
type: DT_HALF
type: DT_UINT32
type: DT_UINT64
}
}
}
@ -28749,18 +28741,10 @@ op {
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_UINT8
type: DT_INT16
type: DT_INT8
type: DT_INT64
type: DT_BFLOAT16
type: DT_UINT16
type: DT_HALF
type: DT_UINT32
type: DT_UINT64
}
}
}
@ -28780,18 +28764,10 @@ op {
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_UINT8
type: DT_INT16
type: DT_INT8
type: DT_INT64
type: DT_BFLOAT16
type: DT_UINT16
type: DT_HALF
type: DT_UINT32
type: DT_UINT64
}
}
}
@ -28815,18 +28791,10 @@ op {
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_UINT8
type: DT_INT16
type: DT_INT8
type: DT_INT64
type: DT_BFLOAT16
type: DT_UINT16
type: DT_HALF
type: DT_UINT32
type: DT_UINT64
}
}
}

View File

@ -21,7 +21,6 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn_ops
@ -125,8 +124,8 @@ class SoftplusTest(test.TestCase):
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"No OpKernel was registered to support Op 'Softplus'"):
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softplus(constant_op.constant(7)).eval()

View File

@ -21,7 +21,6 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
@ -69,8 +68,8 @@ class SoftsignTest(test.TestCase):
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"No OpKernel was registered to support Op 'Softsign'"):
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softsign(constant_op.constant(7)).eval()