Replace deprecated usage of np.asscalar with np.ndarray.item()
[`numpy.asscalar()` is deprecated since version 1.16](https://github.com/numpy/numpy/blob/master/numpy/lib/type_check.py#L519-L548). This PR replaces its usage with [`numpy.ndarray.item()`](https://www.numpy.org/devdocs/reference/generated/numpy.ndarray.item.html)
This commit is contained in:
parent
54dc5baa26
commit
3623f9ae51
@ -69,7 +69,7 @@ def _ConvertNumpyArrayToLiteral(ndarray):
|
||||
|
||||
if ndarray.ndim == 0:
|
||||
getattr(literal, type_record.literal_field_name).append(
|
||||
_np.asscalar(ndarray.astype(type_record.literal_field_type)))
|
||||
ndarray.astype(type_record.literal_field_type).item())
|
||||
else:
|
||||
# Ndarrays with boolean dtypes need special type conversion with protobufs
|
||||
if ndarray.dtype in {_np.bool_, _np.dtype('bool')}:
|
||||
|
@ -131,4 +131,4 @@ def AppendBoolArrayToTensorProto(tensor_proto, nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.bool_val.append(np.asscalar(nparray[i]))
|
||||
tensor_proto.bool_val.append(nparray.item(i))
|
||||
|
@ -42,7 +42,7 @@ from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
def ExtractBitsFromFloat16(x):
|
||||
return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16))
|
||||
return np.asarray(x, dtype=np.float16).view(np.uint16).item()
|
||||
|
||||
|
||||
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
|
||||
@ -58,8 +58,8 @@ def _MediumAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
|
||||
|
||||
|
||||
def ExtractBitsFromBFloat16(x):
|
||||
return np.asscalar(
|
||||
np.asarray(x, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16))
|
||||
return np.asarray(
|
||||
x, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16).item()
|
||||
|
||||
|
||||
def SlowAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
|
||||
@ -122,39 +122,39 @@ if _FAST_TENSOR_UTIL_AVAILABLE:
|
||||
else:
|
||||
|
||||
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
|
||||
tensor_proto.float_val.extend([x.item() for x in proto_values])
|
||||
|
||||
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])
|
||||
tensor_proto.double_val.extend([x.item() for x in proto_values])
|
||||
|
||||
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.int_val.extend([np.asscalar(x) for x in proto_values])
|
||||
tensor_proto.int_val.extend([x.item() for x in proto_values])
|
||||
|
||||
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
|
||||
tensor_proto.int64_val.extend([x.item() for x in proto_values])
|
||||
|
||||
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.int_val.extend([np.asscalar(x[0]) for x in proto_values])
|
||||
tensor_proto.int_val.extend([x.item(0) for x in proto_values])
|
||||
|
||||
def SlowAppendUInt32ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.uint32_val.extend([np.asscalar(x) for x in proto_values])
|
||||
tensor_proto.uint32_val.extend([x.item() for x in proto_values])
|
||||
|
||||
def SlowAppendUInt64ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.uint64_val.extend([np.asscalar(x) for x in proto_values])
|
||||
tensor_proto.uint64_val.extend([x.item() for x in proto_values])
|
||||
|
||||
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.scomplex_val.extend(
|
||||
[np.asscalar(v) for x in proto_values for v in [x.real, x.imag]])
|
||||
[v.item() for x in proto_values for v in [x.real, x.imag]])
|
||||
|
||||
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.dcomplex_val.extend(
|
||||
[np.asscalar(v) for x in proto_values for v in [x.real, x.imag]])
|
||||
[v.item() for x in proto_values for v in [x.real, x.imag]])
|
||||
|
||||
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
|
||||
|
||||
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.bool_val.extend([np.asscalar(x) for x in proto_values])
|
||||
tensor_proto.bool_val.extend([x.item() for x in proto_values])
|
||||
|
||||
_NP_TO_APPEND_FN = {
|
||||
dtypes.bfloat16.as_numpy_dtype: SlowAppendBFloat16ArrayToTensorProto,
|
||||
|
@ -595,7 +595,7 @@ class MinMaxOpTest(test.TestCase):
|
||||
|
||||
def testScalar(self):
|
||||
x = np.random.rand(1, 3, 2) * 100.
|
||||
y = np.asscalar(np.random.rand(1) * 100.) # should broadcast
|
||||
y = np.random.rand(1).item() * 100. # should broadcast
|
||||
# dropped np.float64, int64 because TF automatically converts to 32 bit
|
||||
for t in [np.float32, np.int32]:
|
||||
self._compare(x.astype(t), t(y), use_gpu=False)
|
||||
|
@ -2334,7 +2334,7 @@ def leaky_relu(features, alpha=0.2, name=None):
|
||||
features = math_ops.to_float(features)
|
||||
if compat.forward_compatible(2018, 11, 1):
|
||||
if isinstance(alpha, np.ndarray):
|
||||
alpha = np.asscalar(alpha)
|
||||
alpha = alpha.item()
|
||||
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
|
||||
alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha")
|
||||
return math_ops.maximum(alpha * features, features, name=name)
|
||||
|
Loading…
x
Reference in New Issue
Block a user