diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD index 0b9aca16ee1..72a81f6742e 100644 --- a/tensorflow/python/BUILD +++ b/tensorflow/python/BUILD @@ -443,6 +443,7 @@ cc_library( ":numpy_lib", ":safe_ptr", "//tensorflow/c:c_api", + "//tensorflow/c:c_api_internal", "//tensorflow/c:tf_status_helper", "//tensorflow/core:framework", "//tensorflow/core:lib", diff --git a/tensorflow/python/eager/pywrap_tensor.cc b/tensorflow/python/eager/pywrap_tensor.cc index 775830b48ae..0f9da663859 100644 --- a/tensorflow/python/eager/pywrap_tensor.cc +++ b/tensorflow/python/eager/pywrap_tensor.cc @@ -627,7 +627,36 @@ static PyObject* EagerTensor_numpy(EagerTensor* self) { PyErr_SetString(PyExc_RuntimeError, TF_Message(status.get())); return nullptr; } + + // HACK(slebedev): The following explains why TensorToNdarray never + // reuses the storage. + // + // TF_TensorToPyArray copies the storage unless its + // refcount is 1. For DT_STRING and DT_RESOURCE TF_TensorFromTensor + // has to copy so the refcount of the original storage is unchanged. + // However, if the storage can be reused by TF_TensorFromTensor its + // refcount is +1'd and hence TF_TensorToPyArray no longer can reuse it. + // + // Here we attempt a direct conversion without an intermediate TF_Tensor + // and fall-back to the slow path on failure. PyObject* ret = nullptr; + if (t->dtype() != tensorflow::DT_STRING && + t->dtype() != tensorflow::DT_RESOURCE) { + tensorflow::gtl::InlinedVector dims(t->dims()); + for (int d = 0; d < t->dims(); ++d) { + dims[d] = t->dim_size(d); + } + + auto* copy = new tensorflow::Tensor(*t); + char* data = const_cast(copy->tensor_data().data()); + if (tensorflow::ArrayFromMemory( + dims.size(), dims.data(), data, t->dtype(), [copy] { delete copy; }, + &ret) + .ok()) { + return ret; + } + } + auto cppstatus = tensorflow::TensorToNdarray(*t, &ret); if (MaybeRaiseExceptionFromStatus(cppstatus, PyExc_RuntimeError)) { Py_XDECREF(ret); diff --git a/tensorflow/python/eager/tensor_test.py b/tensorflow/python/eager/tensor_test.py index 238f0f9eb1c..b5833718c79 100644 --- a/tensorflow/python/eager/tensor_test.py +++ b/tensorflow/python/eager/tensor_test.py @@ -487,6 +487,11 @@ class TFETensorUtilTest(test_util.TensorFlowTestCase): ValueError, "non-rectangular Python sequence"): constant_op.constant(l) + def test_numpyIsView(self): + t = constant_op.constant([0.0]) + t._numpy()[0] = 42.0 + self.assertAllClose(t, constant_op.constant([42.0])) + if __name__ == "__main__": test.main() diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py index f806a15a94a..8947e1f6597 100644 --- a/tensorflow/python/framework/ops.py +++ b/tensorflow/python/framework/ops.py @@ -758,7 +758,8 @@ class _EagerTensorBase(Tensor): """ if self.dtype == dtypes.resource: raise ValueError("Resource handles are not convertible to numpy.") - return self._cpu_nograd()._numpy() # pylint: disable=protected-access + maybe_arr = self._cpu_nograd()._numpy() # pylint: disable=protected-access + return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr # __int__, __float__ and __index__ may copy the tensor to CPU and # only work for scalars; values are cast as per numpy. @@ -772,7 +773,7 @@ class _EagerTensorBase(Tensor): return int(self.numpy()) def __array__(self, dtype=None): - return np.array(self.numpy(), dtype=dtype) + return np.asarray(self.numpy(), dtype=dtype) def __format__(self, format_spec): return self.numpy().__format__(format_spec)