Merge pull request #3961 from gunan/r0.10
Cherrypicks to fix some issues in R0.10RC
This commit is contained in:
commit
44595c44ee
@ -226,4 +226,49 @@ TEST(CCOpTest, ColocateWith) {
|
||||
EXPECT_TRUE(attrs.find("_class") == attrs.end());
|
||||
}
|
||||
|
||||
TEST(CCOpTest, TemplatedConst) {
|
||||
Scope root = Scope::NewRootScope();
|
||||
auto c1 = ops::Const<float>(root, {{3, 2}, {-1, 0}});
|
||||
TF_EXPECT_OK(root.status());
|
||||
|
||||
Tensor out;
|
||||
GetTensor(root, c1, &out);
|
||||
test::ExpectTensorEqual<float>(
|
||||
out, test::AsTensor<float>({3.f, 2.f, -1.f, 0.f}, {2, 2}));
|
||||
|
||||
auto c2 = ops::Const<string>(root, {{"this"}, {"is"}, {"a"}, {"constant"}});
|
||||
GetTensor(root, c2, &out);
|
||||
test::ExpectTensorEqual<string>(
|
||||
out, test::AsTensor<string>({"this", "is", "a", "constant"}, {4, 1}));
|
||||
}
|
||||
|
||||
TEST(CCOpTest, EmptyConst) {
|
||||
Scope root = Scope::NewRootScope();
|
||||
|
||||
auto c1 = ops::Const(root, {});
|
||||
TF_CHECK_OK(root.status());
|
||||
|
||||
Tensor out;
|
||||
GetTensor(root, c1, &out);
|
||||
test::ExpectTensorEqual<float>(out, Tensor(DT_FLOAT, {0}));
|
||||
|
||||
auto c2 = ops::Const(root, {{}});
|
||||
TF_CHECK_OK(root.status());
|
||||
GetTensor(root, c2, &out);
|
||||
test::ExpectTensorEqual<float>(out, Tensor(DT_FLOAT, {1, 0}));
|
||||
|
||||
auto c3 = ops::Const(root, {{{}, {}}});
|
||||
TF_CHECK_OK(root.status());
|
||||
GetTensor(root, c3, &out);
|
||||
test::ExpectTensorEqual<float>(out, Tensor(DT_FLOAT, {1, 2, 0}));
|
||||
|
||||
auto c4 = ops::Const<int>(root, {{{}}});
|
||||
TF_CHECK_OK(root.status());
|
||||
GetTensor(root, c4, &out);
|
||||
test::ExpectTensorEqual<int>(out, Tensor(DT_INT32, {1, 1, 0}));
|
||||
|
||||
ops::Const(root, {{}, {{}}});
|
||||
EXPECT_FALSE(root.status().ok());
|
||||
}
|
||||
|
||||
} // namespace tensorflow
|
||||
|
@ -25,22 +25,35 @@ namespace ops {
|
||||
|
||||
Output Const(const Scope& scope, const Input::Initializer& val);
|
||||
|
||||
NodeBuilder::NodeOut AsNodeOut(const Scope& scope, const Input& inp);
|
||||
|
||||
template <typename T>
|
||||
Output Const(const Scope& scope, const Input::Initializer& val) {
|
||||
auto orig_const_output = Const(scope, val);
|
||||
if (!scope.ok()) return Output();
|
||||
if (!val.status.ok()) {
|
||||
scope.UpdateStatus(val.status);
|
||||
return Output();
|
||||
}
|
||||
|
||||
typedef typename Input::Initializer::RealType<T>::type DstT;
|
||||
if (val.tensor.NumElements() > 0) {
|
||||
// TODO(keveman): Implement the in-situ cast.
|
||||
scope.UpdateStatus(errors::Unimplemented(
|
||||
"Explict cast of a non-empty tensor not implemented yet"));
|
||||
return Output();
|
||||
|
||||
if (val.tensor.dtype() == DataTypeToEnum<DstT>::v()) {
|
||||
return orig_const_output;
|
||||
}
|
||||
Tensor t(DataTypeToEnum<DstT>::v(), val.tensor.shape());
|
||||
return Const(scope, Input::Initializer(t));
|
||||
if (val.tensor.NumElements() == 0) {
|
||||
Tensor t(DataTypeToEnum<DstT>::v(), val.tensor.shape());
|
||||
return Const(scope, Input::Initializer(t));
|
||||
}
|
||||
|
||||
// TODO(keveman): Refactor Cast op's kernel implementation such that the code
|
||||
// can be directly called here instead of adding the Cast op to the graph.
|
||||
auto orig_const = AsNodeOut(scope, orig_const_output);
|
||||
const auto cast_op_name = scope.GetUniqueNameForOp("Cast");
|
||||
|
||||
auto cast_builder = NodeBuilder(cast_op_name, "Cast")
|
||||
.Input(orig_const)
|
||||
.Attr("DstT", DataTypeToEnum<DstT>::v());
|
||||
scope.UpdateBuilder(&cast_builder);
|
||||
Node* ret;
|
||||
scope.UpdateStatus(cast_builder.Finalize(scope.graph(), &ret));
|
||||
return Output(ret, 0);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -54,8 +67,6 @@ Output Const(const Scope& scope, const std::initializer_list<T>& v,
|
||||
return Const(scope, Input::Initializer(v, shape));
|
||||
}
|
||||
|
||||
NodeBuilder::NodeOut AsNodeOut(const Scope& scope, const Input& inp);
|
||||
|
||||
std::vector<NodeBuilder::NodeOut> AsNodeOutList(const Scope& scope,
|
||||
const InputList& inp);
|
||||
|
||||
|
@ -125,4 +125,13 @@ TEST(ConstOpTest, Names) {
|
||||
EXPECT_EQ(c_y_1.node()->name(), "c/y_1");
|
||||
}
|
||||
|
||||
TEST(ConstOpTest, TemplatedConst) {
|
||||
Scope root = Scope::NewRootScope();
|
||||
auto c1 = ops::Const<int>(root, {1, 2});
|
||||
ExpectTypeAndShape(c1.node(), DT_INT32, {2});
|
||||
|
||||
auto c2 = ops::Const<string>(root, {{"this"}, {"is"}, {"a"}, {"constant"}});
|
||||
ExpectTypeAndShape(c2.node(), DT_STRING, {4, 1});
|
||||
}
|
||||
|
||||
} // namespace tensorflow
|
||||
|
@ -386,61 +386,20 @@ struct logical_not : base<bool, Eigen::internal::scalar_boolean_not_op<bool> > {
|
||||
// NOTE: std::isinf, std::isnan, std::isfinite are plain function.
|
||||
// Therefore we need to wrap them in functors to be used with Eigen's
|
||||
// type system.
|
||||
template <typename T>
|
||||
struct isinf : base<T, Eigen::internal::scalar_isinf_op<T>, bool> {};
|
||||
|
||||
template <typename T>
|
||||
struct isinf_func {
|
||||
typedef bool result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(T x) const {
|
||||
return Eigen::numext::isinf(x);
|
||||
}
|
||||
};
|
||||
struct isnan : base<T, Eigen::internal::scalar_isnan_op<T>, bool> {};
|
||||
|
||||
template <typename T>
|
||||
struct isinf : base<T, isinf_func<T>, bool> {};
|
||||
struct isfinite : base<T, Eigen::internal::scalar_isfinite_op<T>, bool> {};
|
||||
|
||||
template <typename T>
|
||||
struct isnan_func {
|
||||
typedef bool result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(T x) const {
|
||||
return Eigen::numext::isnan(x);
|
||||
}
|
||||
};
|
||||
struct floor : base<T, Eigen::internal::scalar_floor_op<T>> {};
|
||||
|
||||
template <typename T>
|
||||
struct isnan : base<T, isnan_func<T>, bool> {};
|
||||
|
||||
template <typename T>
|
||||
struct isfinite_func {
|
||||
typedef bool result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool operator()(T x) const {
|
||||
return Eigen::numext::isfinite(x);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct isfinite : base<T, isfinite_func<T>, bool> {};
|
||||
|
||||
template <typename T>
|
||||
struct floor_func {
|
||||
typedef T result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(T x) const {
|
||||
return Eigen::numext::floor(x);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct floor : base<T, floor_func<T> > {};
|
||||
|
||||
template <typename T>
|
||||
struct ceil_func {
|
||||
typedef T result_type;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator()(T x) const {
|
||||
return Eigen::numext::ceil(x);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct ceil : base<T, ceil_func<T> > {};
|
||||
struct ceil : base<T, Eigen::internal::scalar_ceil_op<T>> {};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Binary functors
|
||||
|
@ -110,7 +110,7 @@ struct CopyThatWorksWithStringPointer<Eigen::half> {
|
||||
d(d_start[0] + i0, d_start[1] + i1, d_start[2] + i2,
|
||||
d_start[3] + i3, d_start[4] + i4, d_start[5] + i5,
|
||||
d_start[6] + i6, d_start[7] + i7) =
|
||||
Eigen::internal::raw_uint16_to_half(
|
||||
Eigen::half_impl::raw_uint16_to_half(
|
||||
s(s_start[0] + i0, s_start[1] + i1, s_start[2] + i2,
|
||||
s_start[3] + i3, s_start[4] + i4, s_start[5] + i5,
|
||||
s_start[6] + i6, s_start[7] + i7));
|
||||
|
@ -55,13 +55,13 @@ def SortEigenDecomposition(e, v):
|
||||
|
||||
def _GetSelfAdjointEigTest(dtype_, shape_):
|
||||
|
||||
def CompareEigenVectors(self, x, y, atol):
|
||||
def CompareEigenVectors(self, x, y, tol):
|
||||
# Eigenvectors are only unique up to sign so we normalize the signs first.
|
||||
signs = np.sign(np.sum(np.divide(x, y), -2, keepdims=True))
|
||||
x *= signs
|
||||
self.assertAllClose(x, y, atol)
|
||||
self.assertAllClose(x, y, atol=tol, rtol=tol)
|
||||
|
||||
def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, atol):
|
||||
def CompareEigenDecompositions(self, x_e, x_v, y_e, y_v, tol):
|
||||
num_batches = int(np.prod(x_e.shape[:-1]))
|
||||
n = x_e.shape[-1]
|
||||
x_e = np.reshape(x_e, [num_batches] + [n])
|
||||
@ -71,8 +71,8 @@ def _GetSelfAdjointEigTest(dtype_, shape_):
|
||||
for i in range(num_batches):
|
||||
x_ei, x_vi = SortEigenDecomposition(x_e[i, :], x_v[i, :, :])
|
||||
y_ei, y_vi = SortEigenDecomposition(y_e[i, :], y_v[i, :, :])
|
||||
self.assertAllClose(x_ei, y_ei, atol=atol)
|
||||
CompareEigenVectors(self, x_vi, y_vi, atol)
|
||||
self.assertAllClose(x_ei, y_ei, atol=tol, rtol=tol)
|
||||
CompareEigenVectors(self, x_vi, y_vi, tol)
|
||||
|
||||
def Test(self):
|
||||
np.random.seed(1)
|
||||
@ -85,7 +85,7 @@ def _GetSelfAdjointEigTest(dtype_, shape_):
|
||||
if dtype_ == np.float32:
|
||||
atol = 1e-4
|
||||
else:
|
||||
atol = 1e-14
|
||||
atol = 1e-12
|
||||
for compute_v in False, True:
|
||||
np_e, np_v = np.linalg.eig(a)
|
||||
with self.test_session():
|
||||
|
@ -7,8 +7,8 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
|
||||
|
||||
# These lines need to be changed when updating Eigen. They are parsed from
|
||||
# this file by the cmake and make builds to determine the eigen version and hash.
|
||||
eigen_version = "b4fa9622b809"
|
||||
eigen_sha256 = "2862840c2de9c0473a4ef20f8678949ae89ab25965352ee53329e63ba46cec62"
|
||||
eigen_version = "6f952374ef2b"
|
||||
eigen_sha256 = "56d658324b09de3f418ae42ca0646dd1e6e0b897dd58b164ec0d21315764afd9"
|
||||
|
||||
native.new_http_archive(
|
||||
name = "eigen_archive",
|
||||
|
Loading…
Reference in New Issue
Block a user