diff --git a/configure b/configure index 2d7ec77aec2..0faf61c67b1 100755 --- a/configure +++ b/configure @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash ## Set up python-related environment settings while true; do diff --git a/tensorflow/cc/ops/const_op.cc b/tensorflow/cc/ops/const_op.cc index cdf653a880e..ddfa2a5b0e0 100644 --- a/tensorflow/cc/ops/const_op.cc +++ b/tensorflow/cc/ops/const_op.cc @@ -87,6 +87,9 @@ DEFINE_CONST(bool, bool_val); DEFINE_CONST_IMPL(complex64, proto.add_scomplex_val(t.begin()->real()); proto.add_scomplex_val(t.begin()->imag());); +DEFINE_CONST_IMPL(complex128, proto.add_dcomplex_val(t.begin()->real()); + proto.add_dcomplex_val(t.begin()->imag());); + Node* Const(StringPiece s, const GraphDefBuilder::Options& options) { if (options.HaveError()) return nullptr; NodeBuilder node_builder(options.GetNameForOp(OpName()), OpName(), diff --git a/tensorflow/cc/ops/const_op.h b/tensorflow/cc/ops/const_op.h index 36a97f8e4ca..0a1ee3f1e05 100644 --- a/tensorflow/cc/ops/const_op.h +++ b/tensorflow/cc/ops/const_op.h @@ -49,6 +49,7 @@ DECLARE_CONST(uint8); DECLARE_CONST(int16); DECLARE_CONST(int8); DECLARE_CONST(complex64); +DECLARE_CONST(complex128); DECLARE_CONST(int64); DECLARE_CONST(bool); diff --git a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py index 525a984ee98..8116ad00b0f 100644 --- a/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py +++ b/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py @@ -21,6 +21,8 @@ import os.path import threading import uuid +from six.moves import range # pylint: disable=redefined-builtin + from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework.load_library import load_op_library @@ -223,7 +225,7 @@ class SdcaModel(object): dense_features = self._convert_n_to_tensor(examples['dense_features']) dense_variables = self._convert_n_to_tensor(self._variables[ 'dense_features_weights']) - for i in xrange(len(dense_variables)): + for i in range(len(dense_variables)): predictions += dense_features[i] * dense_variables[i] return predictions diff --git a/tensorflow/core/framework/allocator.h b/tensorflow/core/framework/allocator.h index 97a3f616930..30c7c191023 100644 --- a/tensorflow/core/framework/allocator.h +++ b/tensorflow/core/framework/allocator.h @@ -187,13 +187,14 @@ class Allocator { // is_simple::value if T[] can be safely constructed and destructed // without running T() and ~T(). We do not use std::is_trivial - // directly because std::complex is not trival but its array - // can be constructed and destructed without running its default ctor - // and dtor. + // directly because std::complex and std::complex are + // not trival, but their arrays can be constructed and destructed + // without running their default ctors and dtors. template struct is_simple { static const bool value = std::is_trivial::value || std::is_same::value || + std::is_same::value || is_quantized::value; }; diff --git a/tensorflow/core/framework/node_def_util_test.cc b/tensorflow/core/framework/node_def_util_test.cc index 07bd60f3b7b..e7dd1e58271 100644 --- a/tensorflow/core/framework/node_def_util_test.cc +++ b/tensorflow/core/framework/node_def_util_test.cc @@ -151,7 +151,7 @@ TEST(NodeDefUtilTest, Out) { ExpectFailure(bad, op, "Value for attr 'T' of string is not in the list of allowed " "values: float, double, int64, int32, uint8, uint16, int16, " - "int8, complex64, qint8, quint8, qint32"); + "int8, complex64, complex128, qint8, quint8, qint32"); } TEST(NodeDefUtilTest, Enum) { diff --git a/tensorflow/core/framework/numeric_types.h b/tensorflow/core/framework/numeric_types.h index c6230dab24e..9523e35b4ea 100644 --- a/tensorflow/core/framework/numeric_types.h +++ b/tensorflow/core/framework/numeric_types.h @@ -24,6 +24,8 @@ namespace tensorflow { // Single precision complex. typedef std::complex complex64; +// Double precision complex. +typedef std::complex complex128; } // end namespace tensorflow diff --git a/tensorflow/core/framework/op_def_builder_test.cc b/tensorflow/core/framework/op_def_builder_test.cc index 2d6a7f01aea..fbef9ebf624 100644 --- a/tensorflow/core/framework/op_def_builder_test.cc +++ b/tensorflow/core/framework/op_def_builder_test.cc @@ -113,7 +113,7 @@ TEST_F(OpDefBuilderTest, AttrWithRestrictions) { ExpectSuccess(b().Attr("a:numbertype"), "attr: { name: 'a' type: 'type' allowed_values { list { type: " "[DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_INT16, " - "DT_UINT16, DT_INT8, DT_COMPLEX64, DT_QINT8, DT_QUINT8, " + "DT_UINT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, " "DT_QINT32] } } }"); ExpectSuccess(b().Attr("a:realnumbertype"), "attr: { name: 'a' type: 'type' allowed_values { list { type: " diff --git a/tensorflow/core/framework/op_def_util_test.cc b/tensorflow/core/framework/op_def_util_test.cc index 854016f3569..813576c2e1b 100644 --- a/tensorflow/core/framework/op_def_util_test.cc +++ b/tensorflow/core/framework/op_def_util_test.cc @@ -246,6 +246,10 @@ TEST_F(ValidateOpDefTest, BadAttrAllowed) { TestBuilder(OpDefBuilder("BadAttrtude") .Attr("x: list(realnumbertype) = [DT_COMPLEX64]")), "attr 'x' of complex64 is not in the list of allowed values"); + ExpectFailure( + TestBuilder(OpDefBuilder("BadAttrtude") + .Attr("x: list(realnumbertype) = [DT_COMPLEX128]")), + "attr 'x' of complex128 is not in the list of allowed values"); // Is in list of allowed strings. TF_EXPECT_OK(TestBuilder( OpDefBuilder("GoodAttrtude").Attr("x: {'foo', 'bar'} = 'bar'"))); diff --git a/tensorflow/core/framework/register_types.h b/tensorflow/core/framework/register_types.h index d08388a83bb..1474dc62431 100644 --- a/tensorflow/core/framework/register_types.h +++ b/tensorflow/core/framework/register_types.h @@ -63,14 +63,16 @@ limitations under the License. m(int16); \ m(int8) -// Call "m" for all number types, including complex64. +// Call "m" for all number types, including complex64 and complex128. #define TF_CALL_NUMBER_TYPES(m) \ TF_CALL_REAL_NUMBER_TYPES(m); \ - m(complex64) + m(complex64); \ + m(complex128) #define TF_CALL_NUMBER_TYPES_NO_INT32(m) \ TF_CALL_REAL_NUMBER_TYPES_NO_INT32(m); \ - m(complex64) + m(complex64); \ + m(complex128) #define TF_CALL_POD_TYPES(m) \ TF_CALL_NUMBER_TYPES(m); \ diff --git a/tensorflow/core/framework/tensor.cc b/tensorflow/core/framework/tensor.cc index e701b663194..e56db2af8c2 100644 --- a/tensorflow/core/framework/tensor.cc +++ b/tensorflow/core/framework/tensor.cc @@ -215,6 +215,22 @@ struct ProtoHelper { } }; +template <> +struct ProtoHelper { + typedef Helper::RepeatedFieldType FieldType; + static const complex128* Begin(const TensorProto& proto) { + return reinterpret_cast(proto.dcomplex_val().data()); + } + static size_t NumElements(const TensorProto& proto) { + return proto.dcomplex_val().size() / 2; + } + static void Fill(const complex128* data, size_t n, TensorProto* proto) { + const double* p = reinterpret_cast(data); + FieldType copy(p, p + n * 2); + proto->mutable_dcomplex_val()->Swap(©); + } +}; + template <> struct ProtoHelper { typedef Helper::RepeatedFieldType FieldType; @@ -385,6 +401,7 @@ void Tensor::UnsafeCopyFromInternal(const Tensor& other, CASE(int8, SINGLE_ARG(STMTS)) \ CASE(string, SINGLE_ARG(STMTS)) \ CASE(complex64, SINGLE_ARG(STMTS)) \ + CASE(complex128, SINGLE_ARG(STMTS)) \ CASE(int64, SINGLE_ARG(STMTS)) \ CASE(bool, SINGLE_ARG(STMTS)) \ CASE(qint32, SINGLE_ARG(STMTS)) \ diff --git a/tensorflow/core/framework/tensor.proto b/tensorflow/core/framework/tensor.proto index 013a2d0607a..59fc96420bb 100644 --- a/tensorflow/core/framework/tensor.proto +++ b/tensorflow/core/framework/tensor.proto @@ -57,4 +57,8 @@ message TensorProto { // DT_BOOL repeated bool bool_val = 11 [packed = true]; + + // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real + // and imaginary parts of i-th double precision complex. + repeated double dcomplex_val = 12 [packed = true]; }; diff --git a/tensorflow/core/framework/tensor_test.cc b/tensorflow/core/framework/tensor_test.cc index ec0fb57aad7..13896f9177d 100644 --- a/tensorflow/core/framework/tensor_test.cc +++ b/tensorflow/core/framework/tensor_test.cc @@ -47,12 +47,17 @@ TEST(TensorTest, DataType_Traits) { // Unfortunately. std::complex::complex() initializes (0, 0). EXPECT_FALSE(std::is_trivial::value); - EXPECT_FALSE(std::is_trivial>::value); + EXPECT_FALSE(std::is_trivial::value); EXPECT_TRUE(std::is_trivial::value); - struct MyComplex { + EXPECT_TRUE(std::is_trivial::value); + struct MyComplex64 { float re, im; }; - EXPECT_TRUE(std::is_trivial::value); + EXPECT_TRUE(std::is_trivial::value); + struct MyComplex128 { + double re, im; + }; + EXPECT_TRUE(std::is_trivial::value); } template @@ -420,13 +425,19 @@ TEST(Tensor_Bool, SimpleWithHelper) { test::ExpectTensorEqual(t1, t2); } -TEST(Tensor_Complex, Simple) { +TEST(Tensor_Complex, Simple64) { Tensor t(DT_COMPLEX64, {4, 5, 3, 7}); t.flat().setRandom(); TestCopies(t); } -TEST(Tensor_Complex, SimpleWithHelper) { +TEST(Tensor_Complex, Simple128) { + Tensor t(DT_COMPLEX128, {4, 5, 3, 7}); + t.flat().setRandom(); + TestCopies(t); +} + +TEST(Tensor_Complex, SimpleWithHelper64) { { Tensor t1 = test::AsTensor({0, {1, 1}, @@ -444,7 +455,7 @@ TEST(Tensor_Complex, SimpleWithHelper) { test::ExpectTensorEqual(t2, t3); } - // Does some numeric operations for complex numbers. + // Does some numeric operations for complex64 numbers. { const float PI = std::acos(-1); const complex64 rotate_45 = std::polar(1.0f, PI / 4); @@ -475,6 +486,55 @@ TEST(Tensor_Complex, SimpleWithHelper) { } } +TEST(Tensor_Complex, SimpleWithHelper128) { + { + Tensor t1 = test::AsTensor({0, + {1, 1}, + complex128(2), + complex128(3, 3), + complex128(0, 4), + complex128(2, 5)}, + {2, 3}); + Tensor t2(t1.dtype(), t1.shape()); + t2.flat() = t1.flat() * complex128(0, 2); + Tensor t3 = test::AsTensor( + {0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}}, + // shape + {2, 3}); + test::ExpectTensorEqual(t2, t3); + } + + // Does some numeric operations for complex128 numbers. + { + const double PI = std::acos(-1); + const complex128 rotate_45 = std::polar(1.0, PI / 4); + + // x contains all the 8-th root of unity. + Tensor x(DT_COMPLEX128, TensorShape({8})); + for (int i = 0; i < 8; ++i) { + x.vec()(i) = std::pow(rotate_45, i); + } + + // Shift the roots by 45 degree. + Tensor y(DT_COMPLEX128, TensorShape({8})); + y.vec() = x.vec() * rotate_45; + Tensor y_expected(DT_COMPLEX128, TensorShape({8})); + for (int i = 0; i < 8; ++i) { + y_expected.vec()(i) = std::pow(rotate_45, i + 1); + } + test::ExpectTensorNear(y, y_expected, 1e-5); + + // Raise roots to the power of 8. + Tensor z(DT_COMPLEX128, TensorShape({8})); + z.vec() = x.vec().pow(8); + Tensor z_expected(DT_COMPLEX128, TensorShape({8})); + for (int i = 0; i < 8; ++i) { + z_expected.vec()(i) = 1; + } + test::ExpectTensorNear(z, z_expected, 1e-5); + } +} + // On the alignment. // // As of 2015/8, tensorflow::Tensor allocates its buffer with 32-byte diff --git a/tensorflow/core/framework/tensor_testutil.h b/tensorflow/core/framework/tensor_testutil.h index 71e1767924e..8d14c252619 100644 --- a/tensorflow/core/framework/tensor_testutil.h +++ b/tensorflow/core/framework/tensor_testutil.h @@ -127,6 +127,12 @@ inline void ExpectEqual(const complex64& a, const complex64& b) { EXPECT_FLOAT_EQ(a.imag(), b.imag()) << a << " vs. " << b; } +template <> +inline void ExpectEqual(const complex128& a, const complex128& b) { + EXPECT_DOUBLE_EQ(a.real(), b.real()) << a << " vs. " << b; + EXPECT_DOUBLE_EQ(a.imag(), b.imag()) << a << " vs. " << b; +} + inline void AssertSameTypeDims(const Tensor& x, const Tensor& y) { ASSERT_EQ(x.dtype(), y.dtype()); ASSERT_TRUE(x.IsSameSize(y)) diff --git a/tensorflow/core/framework/types.cc b/tensorflow/core/framework/types.cc index 54b55e49c01..c87a0445cdf 100644 --- a/tensorflow/core/framework/types.cc +++ b/tensorflow/core/framework/types.cc @@ -64,6 +64,8 @@ string DataTypeString(DataType dtype) { return "string"; case DT_COMPLEX64: return "complex64"; + case DT_COMPLEX128: + return "complex128"; case DT_INT64: return "int64"; case DT_BOOL: @@ -125,6 +127,9 @@ bool DataTypeFromString(StringPiece sp, DataType* dt) { } else if (sp == "complex64") { *dt = DT_COMPLEX64; return true; + } else if (sp == "complex128") { + *dt = DT_COMPLEX128; + return true; } else if (sp == "int64") { *dt = DT_INT64; return true; @@ -165,9 +170,10 @@ string DataTypeSliceString(const DataTypeSlice types) { } DataTypeVector AllTypes() { - return {DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16, DT_UINT16, - DT_INT8, DT_STRING, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8, - DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}; + return {DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16, + DT_UINT16, DT_INT8, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, + DT_INT64, DT_BOOL, DT_QINT8, DT_QUINT8, DT_QINT16, + DT_QUINT16, DT_QINT32}; } #if !defined(__ANDROID__) @@ -188,8 +194,9 @@ DataTypeVector RealAndQuantizedTypes() { } DataTypeVector NumberTypes() { - return {DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_UINT16, - DT_INT16, DT_INT8, DT_COMPLEX64, DT_QINT8, DT_QUINT8, DT_QINT32}; + return {DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, + DT_UINT16, DT_INT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128, + DT_QINT8, DT_QUINT8, DT_QINT32 }; } #else // defined(__ANDROID__) @@ -223,6 +230,7 @@ bool DataTypeCanUseMemcpy(DataType dt) { case DT_INT16: case DT_INT8: case DT_COMPLEX64: + case DT_COMPLEX128: case DT_INT64: case DT_BOOL: case DT_QINT8: diff --git a/tensorflow/core/framework/types.h b/tensorflow/core/framework/types.h index 9651d2b64cd..6de9917d710 100644 --- a/tensorflow/core/framework/types.h +++ b/tensorflow/core/framework/types.h @@ -174,6 +174,7 @@ MATCH_TYPE_AND_ENUM(int16, DT_INT16); MATCH_TYPE_AND_ENUM(int8, DT_INT8); MATCH_TYPE_AND_ENUM(string, DT_STRING); MATCH_TYPE_AND_ENUM(complex64, DT_COMPLEX64); +MATCH_TYPE_AND_ENUM(complex128, DT_COMPLEX128); MATCH_TYPE_AND_ENUM(int64, DT_INT64); MATCH_TYPE_AND_ENUM(bool, DT_BOOL); MATCH_TYPE_AND_ENUM(qint8, DT_QINT8); diff --git a/tensorflow/core/framework/types.proto b/tensorflow/core/framework/types.proto index e6f0b13d97e..27e0b7e9cf4 100644 --- a/tensorflow/core/framework/types.proto +++ b/tensorflow/core/framework/types.proto @@ -30,10 +30,10 @@ enum DataType { DT_QINT16 = 15; // Quantized int16 DT_QUINT16 = 16; // Quantized uint16 DT_UINT16 = 17; + DT_COMPLEX128 = 18; // Double-precision complex // TODO(josh11b): DT_GENERIC_PROTO = ??; // TODO(jeff,josh11b): DT_UINT64? DT_UINT32? - // TODO(zhifengc): DT_COMPLEX128 (double-precision complex)? // Do not use! These are only for parameters. Every enum above // should have a corresponding value below (verified by types_test). @@ -54,4 +54,5 @@ enum DataType { DT_QINT16_REF = 115; DT_QUINT16_REF = 116; DT_UINT16_REF = 117; + DT_COMPLEX128_REF = 118; } diff --git a/tensorflow/core/ops/compat/ops_history.v0.pbtxt b/tensorflow/core/ops/compat/ops_history.v0.pbtxt index 7d4b14e9658..0d9360a931c 100644 --- a/tensorflow/core/ops/compat/ops_history.v0.pbtxt +++ b/tensorflow/core/ops/compat/ops_history.v0.pbtxt @@ -205,6 +205,47 @@ op { is_aggregate: true is_commutative: true } +op { + name: "AddN" + input_arg { + name: "inputs" + type_attr: "T" + number_attr: "N" + } + output_arg { + name: "sum" + type_attr: "T" + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 1 + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + is_aggregate: true + is_commutative: true +} op { name: "AdjustContrast" input_arg { @@ -456,6 +497,60 @@ op { } } } +op { + name: "ApplyAdagrad" + input_arg { + name: "var" + type_attr: "T" + is_ref: true + } + input_arg { + name: "accum" + type_attr: "T" + is_ref: true + } + input_arg { + name: "lr" + type_attr: "T" + } + input_arg { + name: "grad" + type_attr: "T" + } + output_arg { + name: "out" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "ApplyAdam" input_arg { @@ -611,6 +706,155 @@ op { } } } +op { + name: "ApplyAdam" + input_arg { + name: "var" + type_attr: "T" + is_ref: true + } + input_arg { + name: "m" + type_attr: "T" + is_ref: true + } + input_arg { + name: "v" + type_attr: "T" + is_ref: true + } + input_arg { + name: "beta1_power" + type_attr: "T" + } + input_arg { + name: "beta2_power" + type_attr: "T" + } + input_arg { + name: "lr" + type_attr: "T" + } + input_arg { + name: "beta1" + type_attr: "T" + } + input_arg { + name: "beta2" + type_attr: "T" + } + input_arg { + name: "epsilon" + type_attr: "T" + } + input_arg { + name: "grad" + type_attr: "T" + } + output_arg { + name: "out" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} +op { + name: "ApplyFtrl" + input_arg { + name: "var" + type_attr: "T" + is_ref: true + } + input_arg { + name: "accum" + type_attr: "T" + is_ref: true + } + input_arg { + name: "linear" + type_attr: "T" + is_ref: true + } + input_arg { + name: "grad" + type_attr: "T" + } + input_arg { + name: "lr" + type_attr: "T" + } + input_arg { + name: "l1" + type_attr: "T" + } + input_arg { + name: "l2" + type_attr: "T" + } + input_arg { + name: "lr_power" + type_attr: "T" + } + output_arg { + name: "out" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "ApplyFtrl" input_arg { @@ -667,6 +911,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -776,6 +1021,55 @@ op { } } } +op { + name: "ApplyGradientDescent" + input_arg { + name: "var" + type_attr: "T" + is_ref: true + } + input_arg { + name: "alpha" + type_attr: "T" + } + input_arg { + name: "delta" + type_attr: "T" + } + output_arg { + name: "out" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "ApplyMomentum" input_arg { @@ -889,6 +1183,64 @@ op { } } } +op { + name: "ApplyMomentum" + input_arg { + name: "var" + type_attr: "T" + is_ref: true + } + input_arg { + name: "accum" + type_attr: "T" + is_ref: true + } + input_arg { + name: "lr" + type_attr: "T" + } + input_arg { + name: "grad" + type_attr: "T" + } + input_arg { + name: "momentum" + type_attr: "T" + } + output_arg { + name: "out" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "ApplyRMSProp" input_arg { @@ -1028,6 +1380,77 @@ op { } } } +op { + name: "ApplyRMSProp" + input_arg { + name: "var" + type_attr: "T" + is_ref: true + } + input_arg { + name: "ms" + type_attr: "T" + is_ref: true + } + input_arg { + name: "mom" + type_attr: "T" + is_ref: true + } + input_arg { + name: "lr" + type_attr: "T" + } + input_arg { + name: "rho" + type_attr: "T" + } + input_arg { + name: "momentum" + type_attr: "T" + } + input_arg { + name: "epsilon" + type_attr: "T" + } + input_arg { + name: "grad" + type_attr: "T" + } + output_arg { + name: "out" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "ArgMax" input_arg { @@ -1097,6 +1520,42 @@ op { } } } +op { + name: "ArgMax" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "dimension" + type: DT_INT32 + } + output_arg { + name: "output" + type: DT_INT64 + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "ArgMin" input_arg { @@ -1166,6 +1625,42 @@ op { } } } +op { + name: "ArgMin" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "dimension" + type: DT_INT32 + } + output_arg { + name: "output" + type: DT_INT64 + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "Assert" input_arg { @@ -1313,6 +1808,51 @@ op { } } } +op { + name: "AssignAdd" + input_arg { + name: "ref" + type_attr: "T" + is_ref: true + } + input_arg { + name: "value" + type_attr: "T" + } + output_arg { + name: "output_ref" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "AssignSub" input_arg { @@ -1400,6 +1940,51 @@ op { } } } +op { + name: "AssignSub" + input_arg { + name: "ref" + type_attr: "T" + is_ref: true + } + input_arg { + name: "value" + type_attr: "T" + } + output_arg { + name: "output_ref" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "AvgPool" input_arg { @@ -1912,6 +2497,62 @@ op { type: "bool" } } +op { + name: "BatchNormWithGlobalNormalization" + input_arg { + name: "t" + type_attr: "T" + } + input_arg { + name: "m" + type_attr: "T" + } + input_arg { + name: "v" + type_attr: "T" + } + input_arg { + name: "beta" + type_attr: "T" + } + input_arg { + name: "gamma" + type_attr: "T" + } + output_arg { + name: "result" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "variance_epsilon" + type: "float" + } + attr { + name: "scale_after_normalization" + type: "bool" + } +} op { name: "BatchNormWithGlobalNormalizationGrad" input_arg { @@ -2053,6 +2694,78 @@ op { type: "bool" } } +op { + name: "BatchNormWithGlobalNormalizationGrad" + input_arg { + name: "t" + type_attr: "T" + } + input_arg { + name: "m" + type_attr: "T" + } + input_arg { + name: "v" + type_attr: "T" + } + input_arg { + name: "gamma" + type_attr: "T" + } + input_arg { + name: "backprop" + type_attr: "T" + } + output_arg { + name: "dx" + type_attr: "T" + } + output_arg { + name: "dm" + type_attr: "T" + } + output_arg { + name: "dv" + type_attr: "T" + } + output_arg { + name: "db" + type_attr: "T" + } + output_arg { + name: "dg" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "variance_epsilon" + type: "float" + } + attr { + name: "scale_after_normalization" + type: "bool" + } +} op { name: "BatchSelfAdjointEig" input_arg { @@ -2191,6 +2904,55 @@ op { } } } +op { + name: "BiasAdd" + input_arg { + name: "value" + type_attr: "T" + } + input_arg { + name: "bias" + type_attr: "T" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "data_format" + type: "string" + default_value { + s: "NHWC" + } + allowed_values { + list { + s: "NHWC" + s: "NCHW" + } + } + } +} op { name: "BiasAddGrad" input_arg { @@ -2235,6 +2997,51 @@ op { } } } +op { + name: "BiasAddGrad" + input_arg { + name: "out_backprop" + type_attr: "T" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "data_format" + type: "string" + default_value { + s: "NHWC" + } + allowed_values { + list { + s: "NHWC" + s: "NCHW" + } + } + } +} op { name: "BiasAddV1" input_arg { @@ -2270,6 +3077,42 @@ op { } } } +op { + name: "BiasAddV1" + input_arg { + name: "value" + type_attr: "T" + } + input_arg { + name: "bias" + type_attr: "T" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "Bitcast" input_arg { @@ -2321,6 +3164,59 @@ op { } } } +op { + name: "Bitcast" + input_arg { + name: "input" + type_attr: "T" + } + output_arg { + name: "output" + type_attr: "type" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "type" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "BroadcastGradientArgs" input_arg { @@ -5231,6 +6127,38 @@ op { } } } +op { + name: "L2Loss" + input_arg { + name: "t" + type_attr: "T" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "LRN" input_arg { @@ -6067,6 +6995,49 @@ op { } } } +op { + name: "Max" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "reduction_indices" + type: DT_INT32 + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "keep_dims" + type: "bool" + default_value { + b: false + } + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "MaxPool" input_arg { @@ -6453,6 +7424,49 @@ op { } } } +op { + name: "Mean" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "reduction_indices" + type: DT_INT32 + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "keep_dims" + type: "bool" + default_value { + b: false + } + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "Merge" input_arg { @@ -6580,6 +7594,49 @@ op { } } } +op { + name: "Min" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "reduction_indices" + type: DT_INT32 + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "keep_dims" + type: "bool" + default_value { + b: false + } + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "Minimum" input_arg { @@ -7686,6 +8743,49 @@ op { } } } +op { + name: "Prod" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "reduction_indices" + type: DT_INT32 + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "keep_dims" + type: "bool" + default_value { + b: false + } + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "PyFunc" input_arg { @@ -9855,6 +10955,65 @@ op { } } } +op { + name: "ScatterAdd" + input_arg { + name: "ref" + type_attr: "T" + is_ref: true + } + input_arg { + name: "indices" + type_attr: "Tindices" + } + input_arg { + name: "updates" + type_attr: "T" + } + output_arg { + name: "output_ref" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "Tindices" + type: "type" + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "ScatterSub" input_arg { @@ -9970,6 +11129,65 @@ op { } } } +op { + name: "ScatterSub" + input_arg { + name: "ref" + type_attr: "T" + is_ref: true + } + input_arg { + name: "indices" + type_attr: "Tindices" + } + input_arg { + name: "updates" + type_attr: "T" + } + output_arg { + name: "output_ref" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "Tindices" + type: "type" + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "ScatterUpdate" input_arg { @@ -11249,6 +12467,74 @@ op { } } } +op { + name: "SparseApplyAdagrad" + input_arg { + name: "var" + type_attr: "T" + is_ref: true + } + input_arg { + name: "accum" + type_attr: "T" + is_ref: true + } + input_arg { + name: "lr" + type_attr: "T" + } + input_arg { + name: "grad" + type_attr: "T" + } + input_arg { + name: "indices" + type_attr: "Tindices" + } + output_arg { + name: "out" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "Tindices" + type: "type" + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "SparseApplyFtrl" input_arg { @@ -11333,6 +12619,91 @@ op { } } } +op { + name: "SparseApplyFtrl" + input_arg { + name: "var" + type_attr: "T" + is_ref: true + } + input_arg { + name: "accum" + type_attr: "T" + is_ref: true + } + input_arg { + name: "linear" + type_attr: "T" + is_ref: true + } + input_arg { + name: "grad" + type_attr: "T" + } + input_arg { + name: "indices" + type_attr: "Tindices" + } + input_arg { + name: "lr" + type_attr: "T" + } + input_arg { + name: "l1" + type_attr: "T" + } + input_arg { + name: "l2" + type_attr: "T" + } + input_arg { + name: "lr_power" + type_attr: "T" + } + output_arg { + name: "out" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "Tindices" + type: "type" + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "SparseApplyMomentum" input_arg { @@ -11474,6 +12845,78 @@ op { } } } +op { + name: "SparseApplyMomentum" + input_arg { + name: "var" + type_attr: "T" + is_ref: true + } + input_arg { + name: "accum" + type_attr: "T" + is_ref: true + } + input_arg { + name: "lr" + type_attr: "T" + } + input_arg { + name: "grad" + type_attr: "T" + } + input_arg { + name: "indices" + type_attr: "Tindices" + } + input_arg { + name: "momentum" + type_attr: "T" + } + output_arg { + name: "out" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } + attr { + name: "Tindices" + type: "type" + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: false + } + } +} op { name: "SparseConcat" input_arg { @@ -12404,6 +13847,49 @@ op { } } } +op { + name: "Sum" + input_arg { + name: "input" + type_attr: "T" + } + input_arg { + name: "reduction_indices" + type: DT_INT32 + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "keep_dims" + type: "bool" + default_value { + b: false + } + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT64 + type: DT_INT32 + type: DT_UINT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT8 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_QINT8 + type: DT_QUINT8 + type: DT_QINT32 + } + } + } +} op { name: "Switch" input_arg { diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt index 130c2e41567..703a92c0ce4 100644 --- a/tensorflow/core/ops/ops.pbtxt +++ b/tensorflow/core/ops/ops.pbtxt @@ -102,6 +102,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -331,6 +332,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -423,6 +425,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -505,6 +508,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -560,6 +564,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -625,6 +630,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -706,6 +712,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -752,6 +759,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -789,6 +797,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -902,6 +911,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -952,6 +962,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1378,6 +1389,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1463,6 +1475,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1538,6 +1551,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1587,6 +1601,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1641,6 +1656,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1674,6 +1690,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -1694,6 +1711,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -4320,6 +4338,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -5109,6 +5128,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -5419,6 +5439,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -5520,6 +5541,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -6420,6 +6442,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -8132,6 +8155,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -8197,6 +8221,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -9146,6 +9171,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -9243,6 +9269,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -9324,6 +9351,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 @@ -10234,6 +10262,7 @@ op { type: DT_INT16 type: DT_INT8 type: DT_COMPLEX64 + type: DT_COMPLEX128 type: DT_QINT8 type: DT_QUINT8 type: DT_QINT32 diff --git a/tensorflow/core/public/tensor_c_api.h b/tensorflow/core/public/tensor_c_api.h index 14f4dfa8125..b7ac96b6b9b 100644 --- a/tensorflow/core/public/tensor_c_api.h +++ b/tensorflow/core/public/tensor_c_api.h @@ -78,7 +78,8 @@ typedef enum { TF_INT16 = 5, TF_INT8 = 6, TF_STRING = 7, - TF_COMPLEX = 8, // Single-precision complex + TF_COMPLEX64 = 8, // Single-precision complex + TF_COMPLEX = 8, // Old identifier kept for API backwards compatibility TF_INT64 = 9, TF_BOOL = 10, TF_QINT8 = 11, // Quantized int8 @@ -88,6 +89,7 @@ typedef enum { TF_QINT16 = 15, // Quantized int16 TF_QUINT16 = 16, // Quantized uint16 TF_UINT16 = 17, + TF_COMPLEX128 = 18, // Double-precision complex } TF_DataType; // -------------------------------------------------------------------------- diff --git a/tensorflow/core/util/saved_tensor_slice_util.h b/tensorflow/core/util/saved_tensor_slice_util.h index 6c3759ffac8..ce2dc5552e2 100644 --- a/tensorflow/core/util/saved_tensor_slice_util.h +++ b/tensorflow/core/util/saved_tensor_slice_util.h @@ -108,6 +108,7 @@ TENSOR_PROTO_EXTRACT_TYPE(bool, bool, bool); TENSOR_PROTO_EXTRACT_TYPE(float, float, float); TENSOR_PROTO_EXTRACT_TYPE(double, double, double); TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex64, scomplex, float); +TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex128, dcomplex, double); TENSOR_PROTO_EXTRACT_TYPE(int32, int, int32); TENSOR_PROTO_EXTRACT_TYPE(int64, int64, int64); TENSOR_PROTO_EXTRACT_TYPE(uint8, int, int32); diff --git a/tensorflow/examples/android/README.md b/tensorflow/examples/android/README.md index 11c8252f85e..fb737f7004e 100644 --- a/tensorflow/examples/android/README.md +++ b/tensorflow/examples/android/README.md @@ -19,7 +19,7 @@ installed on your system. 3. The Android SDK and build tools may be obtained from: https://developer.android.com/tools/revisions/build-tools.html -The Android entries in [`/WORKSPACE`](../../WORKSPACE) must be +The Android entries in [`/WORKSPACE`](../../../WORKSPACE#L2-L13) must be uncommented with the paths filled in appropriately depending on where you installed the NDK and SDK. Otherwise an error such as: "The external label '//external:android/sdk' is not bound to anything" will @@ -45,10 +45,8 @@ your workspace root: $ bazel build //tensorflow/examples/android:tensorflow_demo ``` -If you get build errors about protocol buffers then you may have left out the -`--recurse-submodules` argument to `git clone`. Review the instructions -here and then build again: -https://www.tensorflow.org/versions/master/get_started/os_setup.html#clone-the-tensorflow-repository +If you get build errors about protocol buffers, run +`git submodule update --init` and build again. If adb debugging is enabled on your Android 5.0 or later device, you may then use the following command from your workspace root to install the APK once diff --git a/tensorflow/examples/label_image/README.md b/tensorflow/examples/label_image/README.md index c24ce19f7f3..1f40e8bef0d 100644 --- a/tensorflow/examples/label_image/README.md +++ b/tensorflow/examples/label_image/README.md @@ -43,15 +43,15 @@ This uses the default example image that ships with the framework, and should output something similar to this: ``` -I tensorflow/examples/label_image/main.cc:200] military uniform (866): 0.902268 -I tensorflow/examples/label_image/main.cc:200] bow tie (817): 0.05407 -I tensorflow/examples/label_image/main.cc:200] suit (794): 0.0113195 -I tensorflow/examples/label_image/main.cc:200] bulletproof vest (833): 0.0100269 -I tensorflow/examples/label_image/main.cc:200] bearskin (849): 0.00649746 +I tensorflow/examples/label_image/main.cc:207] military uniform (866): 0.647299 +I tensorflow/examples/label_image/main.cc:207] suit (794): 0.0477195 +I tensorflow/examples/label_image/main.cc:207] academic gown (896): 0.0232407 +I tensorflow/examples/label_image/main.cc:207] bow tie (817): 0.0157355 +I tensorflow/examples/label_image/main.cc:207] bolo tie (940): 0.0145023 ``` In this case, we're using the default image of Admiral Grace Hopper, and you can see the network correctly spots she's wearing a military uniform, with a high -score of 0.9. +score of 0.6. Next, try it out on your own images by supplying the --image= argument, e.g. diff --git a/tensorflow/examples/udacity/1_notmnist.ipynb b/tensorflow/examples/udacity/1_notmnist.ipynb index b4704a3985d..9d864ccd374 100644 --- a/tensorflow/examples/udacity/1_notmnist.ipynb +++ b/tensorflow/examples/udacity/1_notmnist.ipynb @@ -117,7 +117,7 @@ " print('Found and verified', filename)\n", " else:\n", " raise Exception(\n", - " 'Failed to verify' + filename + '. Can you get to it with a browser?')\n", + " 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n", " return filename\n", "\n", "train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n", diff --git a/tensorflow/models/image/cifar10/cifar10.py b/tensorflow/models/image/cifar10/cifar10.py index 8f5fd4f50d1..05c8e70f5fc 100644 --- a/tensorflow/models/image/cifar10/cifar10.py +++ b/tensorflow/models/image/cifar10/cifar10.py @@ -67,7 +67,7 @@ NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor. INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. -# If a model is trained with multiple GPU's prefix all Op names with tower_name +# If a model is trained with multiple GPUs, prefix all Op names with tower_name # to differentiate the operations. Note that this prefix is removed from the # names of the summaries when visualizing a model. TOWER_NAME = 'tower' @@ -255,7 +255,7 @@ def inference(images): def loss(logits, labels): """Add L2Loss to all the trainable variables. - Add summary for for "Loss" and "Loss/avg". + Add summary for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor diff --git a/tensorflow/models/image/cifar10/cifar10_input.py b/tensorflow/models/image/cifar10/cifar10_input.py index a9a086992dc..0d48a3549ce 100644 --- a/tensorflow/models/image/cifar10/cifar10_input.py +++ b/tensorflow/models/image/cifar10/cifar10_input.py @@ -172,7 +172,7 @@ def distorted_inputs(data_dir, batch_size): distorted_image = tf.image.random_flip_left_right(distorted_image) # Because these operations are not commutative, consider randomizing - # randomize the order their operation. + # the order their operation. distorted_image = tf.image.random_brightness(distorted_image, max_delta=63) distorted_image = tf.image.random_contrast(distorted_image, diff --git a/tensorflow/python/__init__.py b/tensorflow/python/__init__.py index 11bee08246c..c36cdfe30fc 100644 --- a/tensorflow/python/__init__.py +++ b/tensorflow/python/__init__.py @@ -181,6 +181,7 @@ __all__.extend([ 'bfloat16', 'bfloat16_ref', 'bool', 'bool_ref', 'complex64', 'complex64_ref', + 'complex128', 'complex128_ref', 'double', 'double_ref', 'float32', 'float32_ref', 'float64', 'float64_ref', diff --git a/tensorflow/python/client/session_test.py b/tensorflow/python/client/session_test.py index 55868328ffa..491b293125d 100644 --- a/tensorflow/python/client/session_test.py +++ b/tensorflow/python/client/session_test.py @@ -687,7 +687,8 @@ class SessionTest(test_util.TensorFlowTestCase): dtypes.int8, dtypes.int64, dtypes.bool, - dtypes.complex64]: + dtypes.complex64, + dtypes.complex128]: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: np_dtype = dtype.as_numpy_dtype @@ -700,6 +701,8 @@ class SessionTest(test_util.TensorFlowTestCase): np_array = np_array > 0 elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) + elif dtype == dtypes.complex64: + np_array = np.sqrt(np_array.astype(np_dtype)) else: np_array = np_array.astype(np_dtype) diff --git a/tensorflow/python/client/tf_session_helper.cc b/tensorflow/python/client/tf_session_helper.cc index 02014cf3b36..e5cdcddd5d5 100644 --- a/tensorflow/python/client/tf_session_helper.cc +++ b/tensorflow/python/client/tf_session_helper.cc @@ -1,4 +1,4 @@ -/* Copyright 2015 Google Inc. All Rights Reserved. +/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -121,7 +121,10 @@ Status PyArray_TYPE_to_TF_DataType(PyArrayObject* array, *out_tf_datatype = TF_BOOL; break; case NPY_COMPLEX64: - *out_tf_datatype = TF_COMPLEX; + *out_tf_datatype = TF_COMPLEX64; + break; + case NPY_COMPLEX128: + *out_tf_datatype = TF_COMPLEX128; break; case NPY_OBJECT: *out_tf_datatype = TF_STRING; @@ -168,9 +171,12 @@ Status TF_DataType_to_PyArray_TYPE(TF_DataType tf_datatype, case TF_BOOL: *out_pyarray_type = NPY_BOOL; break; - case TF_COMPLEX: + case TF_COMPLEX64: *out_pyarray_type = NPY_COMPLEX64; break; + case TF_COMPLEX128: + *out_pyarray_type = NPY_COMPLEX128; + break; case TF_STRING: *out_pyarray_type = NPY_OBJECT; break; diff --git a/tensorflow/python/framework/dtypes.py b/tensorflow/python/framework/dtypes.py index 9c1e05f8bcd..d964a7f29b9 100644 --- a/tensorflow/python/framework/dtypes.py +++ b/tensorflow/python/framework/dtypes.py @@ -32,6 +32,7 @@ class DType(object): * `tf.float64`: 64-bit double-precision floating-point. * `tf.bfloat16`: 16-bit truncated floating-point. * `tf.complex64`: 64-bit single-precision complex. + * `tf.complex128`: 128-bit double-precision complex. * `tf.int8`: 8-bit signed integer. * `tf.uint8`: 8-bit unsigned integer. @@ -122,6 +123,8 @@ class DType(object): base = self.base_dtype if base == complex64: return float32 + elif base == complex128: + return float64 else: return self @@ -149,7 +152,7 @@ class DType(object): @property def is_complex(self): """Returns whether this is a complex floating point type.""" - return self.base_dtype == complex64 + return self.base_dtype in (complex64, complex128) @property def is_quantized(self): @@ -179,8 +182,8 @@ class DType(object): TypeError: if this is a non-numeric, unordered, or quantized type. """ - if (self.is_quantized or self.base_dtype == bool or - self.base_dtype == string or self.base_dtype == complex64): + if (self.is_quantized or self.base_dtype in + (bool, string, complex64, complex128)): raise TypeError("Cannot find minimum value of %s." % self) # there is no simple way to get the min value of a dtype, we have to check @@ -201,8 +204,8 @@ class DType(object): TypeError: if this is a non-numeric, unordered, or quantized type. """ - if (self.is_quantized or self.base_dtype == bool or - self.base_dtype == string or self.base_dtype == complex64): + if (self.is_quantized or self.base_dtype in + (bool, string, complex64, complex128)): raise TypeError("Cannot find maximum value of %s." % self) # there is no simple way to get the min value of a dtype, we have to check @@ -277,6 +280,7 @@ int16 = DType(types_pb2.DT_INT16) int8 = DType(types_pb2.DT_INT8) string = DType(types_pb2.DT_STRING) complex64 = DType(types_pb2.DT_COMPLEX64) +complex128 = DType(types_pb2.DT_COMPLEX128) int64 = DType(types_pb2.DT_INT64) bool = DType(types_pb2.DT_BOOL) qint8 = DType(types_pb2.DT_QINT8) @@ -295,6 +299,7 @@ int16_ref = DType(types_pb2.DT_INT16_REF) int8_ref = DType(types_pb2.DT_INT8_REF) string_ref = DType(types_pb2.DT_STRING_REF) complex64_ref = DType(types_pb2.DT_COMPLEX64_REF) +complex128_ref = DType(types_pb2.DT_COMPLEX128_REF) int64_ref = DType(types_pb2.DT_INT64_REF) bool_ref = DType(types_pb2.DT_BOOL_REF) qint8_ref = DType(types_pb2.DT_QINT8_REF) @@ -317,6 +322,7 @@ _INTERN_TABLE = { types_pb2.DT_INT8: int8, types_pb2.DT_STRING: string, types_pb2.DT_COMPLEX64: complex64, + types_pb2.DT_COMPLEX128: complex128, types_pb2.DT_INT64: int64, types_pb2.DT_BOOL: bool, types_pb2.DT_QINT8: qint8, @@ -334,6 +340,7 @@ _INTERN_TABLE = { types_pb2.DT_INT8_REF: int8_ref, types_pb2.DT_STRING_REF: string_ref, types_pb2.DT_COMPLEX64_REF: complex64_ref, + types_pb2.DT_COMPLEX128_REF: complex128_ref, types_pb2.DT_INT64_REF: int64_ref, types_pb2.DT_BOOL_REF: bool_ref, types_pb2.DT_QINT8_REF: qint8_ref, @@ -356,6 +363,7 @@ _TYPE_TO_STRING = { types_pb2.DT_INT8: "int8", types_pb2.DT_STRING: "string", types_pb2.DT_COMPLEX64: "complex64", + types_pb2.DT_COMPLEX128: "complex128", types_pb2.DT_INT64: "int64", types_pb2.DT_BOOL: "bool", types_pb2.DT_QINT8: "qint8", @@ -373,6 +381,7 @@ _TYPE_TO_STRING = { types_pb2.DT_INT8_REF: "int8_ref", types_pb2.DT_STRING_REF: "string_ref", types_pb2.DT_COMPLEX64_REF: "complex64_ref", + types_pb2.DT_COMPLEX128_REF: "complex128_ref", types_pb2.DT_INT64_REF: "int64_ref", types_pb2.DT_BOOL_REF: "bool_ref", types_pb2.DT_QINT8_REF: "qint8_ref", @@ -414,6 +423,7 @@ _NP_TO_TF = frozenset([ (np.int16, int16), (np.int8, int8), (np.complex64, complex64), + (np.complex128, complex128), (np.object, string), (np.bool, bool), (_np_qint8, qint8), @@ -435,6 +445,7 @@ _TF_TO_NP = { # strings. types_pb2.DT_STRING: np.object, types_pb2.DT_COMPLEX64: np.complex64, + types_pb2.DT_COMPLEX128: np.complex128, types_pb2.DT_INT64: np.int64, types_pb2.DT_BOOL: np.bool, types_pb2.DT_QINT8: _np_qint8, @@ -454,6 +465,7 @@ _TF_TO_NP = { types_pb2.DT_INT8_REF: np.int8, types_pb2.DT_STRING_REF: np.object, types_pb2.DT_COMPLEX64_REF: np.complex64, + types_pb2.DT_COMPLEX128_REF: np.complex128, types_pb2.DT_INT64_REF: np.int64, types_pb2.DT_BOOL_REF: np.bool, types_pb2.DT_QINT8_REF: _np_qint8, diff --git a/tensorflow/python/framework/dtypes_test.py b/tensorflow/python/framework/dtypes_test.py index 91fada9f0f2..d303918987c 100644 --- a/tensorflow/python/framework/dtypes_test.py +++ b/tensorflow/python/framework/dtypes_test.py @@ -71,6 +71,7 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertIs(tf.int16, tf.as_dtype(np.int16)) self.assertIs(tf.int8, tf.as_dtype(np.int8)) self.assertIs(tf.complex64, tf.as_dtype(np.complex64)) + self.assertIs(tf.complex128, tf.as_dtype(np.complex128)) self.assertIs(tf.string, tf.as_dtype(np.object)) self.assertIs(tf.string, tf.as_dtype(np.array(["foo", "bar"]).dtype)) self.assertIs(tf.bool, tf.as_dtype(np.bool)) @@ -82,6 +83,7 @@ class TypesTest(test_util.TensorFlowTestCase): tf.int32, tf.int64]: self.assertIs(dtype.real_dtype, dtype) self.assertIs(tf.complex64.real_dtype, tf.float32) + self.assertIs(tf.complex128.real_dtype, tf.float64) def testStringConversion(self): self.assertIs(tf.float32, tf.as_dtype("float32")) @@ -93,6 +95,7 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertIs(tf.int8, tf.as_dtype("int8")) self.assertIs(tf.string, tf.as_dtype("string")) self.assertIs(tf.complex64, tf.as_dtype("complex64")) + self.assertIs(tf.complex128, tf.as_dtype("complex128")) self.assertIs(tf.int64, tf.as_dtype("int64")) self.assertIs(tf.bool, tf.as_dtype("bool")) self.assertIs(tf.qint8, tf.as_dtype("qint8")) @@ -107,6 +110,7 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertIs(tf.int8_ref, tf.as_dtype("int8_ref")) self.assertIs(tf.string_ref, tf.as_dtype("string_ref")) self.assertIs(tf.complex64_ref, tf.as_dtype("complex64_ref")) + self.assertIs(tf.complex128_ref, tf.as_dtype("complex128_ref")) self.assertIs(tf.int64_ref, tf.as_dtype("int64_ref")) self.assertIs(tf.bool_ref, tf.as_dtype("bool_ref")) self.assertIs(tf.qint8_ref, tf.as_dtype("qint8_ref")) @@ -135,6 +139,7 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertEqual(tf.as_dtype("uint8").is_integer, True) self.assertEqual(tf.as_dtype("uint16").is_integer, True) self.assertEqual(tf.as_dtype("complex64").is_integer, False) + self.assertEqual(tf.as_dtype("complex128").is_integer, False) self.assertEqual(tf.as_dtype("float").is_integer, False) self.assertEqual(tf.as_dtype("double").is_integer, False) self.assertEqual(tf.as_dtype("string").is_integer, False) @@ -148,6 +153,7 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertEqual(tf.as_dtype("uint8").is_floating, False) self.assertEqual(tf.as_dtype("uint16").is_floating, False) self.assertEqual(tf.as_dtype("complex64").is_floating, False) + self.assertEqual(tf.as_dtype("complex128").is_floating, False) self.assertEqual(tf.as_dtype("float32").is_floating, True) self.assertEqual(tf.as_dtype("float64").is_floating, True) self.assertEqual(tf.as_dtype("string").is_floating, False) @@ -161,6 +167,7 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertEqual(tf.as_dtype("uint8").is_complex, False) self.assertEqual(tf.as_dtype("uint16").is_complex, False) self.assertEqual(tf.as_dtype("complex64").is_complex, True) + self.assertEqual(tf.as_dtype("complex128").is_complex, True) self.assertEqual(tf.as_dtype("float32").is_complex, False) self.assertEqual(tf.as_dtype("float64").is_complex, False) self.assertEqual(tf.as_dtype("string").is_complex, False) @@ -178,6 +185,7 @@ class TypesTest(test_util.TensorFlowTestCase): self.assertEqual(tf.as_dtype("bool").is_unsigned, False) self.assertEqual(tf.as_dtype("string").is_unsigned, False) self.assertEqual(tf.as_dtype("complex64").is_unsigned, False) + self.assertEqual(tf.as_dtype("complex128").is_unsigned, False) def testMinMax(self): # make sure min/max evaluates for all data types that have min/max @@ -192,7 +200,8 @@ class TypesTest(test_util.TensorFlowTestCase): if (dtype.is_quantized or dtype.base_dtype == tf.bool or dtype.base_dtype == tf.string or - dtype.base_dtype == tf.complex64): + dtype.base_dtype == tf.complex64 or + dtype.base_dtype == tf.complex128): continue print("%s: %s - %s" % (dtype, dtype.min, dtype.max)) diff --git a/tensorflow/python/framework/ops_test.py b/tensorflow/python/framework/ops_test.py index cfc96a0cc80..afa5c4812df 100644 --- a/tensorflow/python/framework/ops_test.py +++ b/tensorflow/python/framework/ops_test.py @@ -1289,7 +1289,7 @@ class ColocationGroupTest(test_util.TensorFlowTestCase): with ops.colocate_with(a.op): with ops.colocate_with(b.op, ignore_existing=True): c = constant_op.constant(4.0) - self.assertEqual(set(["loc:@b"]), set(c.op.colocation_groups())) + self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups())) def testColocateVariables(self): a = variables.Variable([2.0], name="a") diff --git a/tensorflow/python/framework/tensor_util.py b/tensorflow/python/framework/tensor_util.py index b1b39f0651d..7a9add319a6 100644 --- a/tensorflow/python/framework/tensor_util.py +++ b/tensorflow/python/framework/tensor_util.py @@ -76,11 +76,16 @@ else: def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values): tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values]) - def SlowAppendComplexArrayToTensorProto(tensor_proto, proto_values): + def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values): tensor_proto.scomplex_val.extend([np.asscalar(v) for x in proto_values for v in [x.real, x.imag]]) + def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values): + tensor_proto.dcomplex_val.extend([np.asscalar(v) + for x in proto_values + for v in [x.real, x.imag]]) + def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values): tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values]) @@ -96,8 +101,8 @@ else: np.uint16: SlowAppendIntArrayToTensorProto, np.int16: SlowAppendIntArrayToTensorProto, np.int8: SlowAppendIntArrayToTensorProto, - np.complex64: SlowAppendComplexArrayToTensorProto, - np.complex128: SlowAppendComplexArrayToTensorProto, + np.complex64: SlowAppendComplex64ArrayToTensorProto, + np.complex128: SlowAppendComplex128ArrayToTensorProto, np.object: SlowAppendObjectArrayToTensorProto, np.bool: SlowAppendBoolArrayToTensorProto, dtypes.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto, @@ -240,6 +245,7 @@ _TF_TO_IS_OK = { dtypes.int8: _FilterInt, dtypes.string: _FilterStr, dtypes.complex64: _FilterComplex, + dtypes.complex128: _FilterComplex, dtypes.int64: _FilterInt, dtypes.bool: _FilterBool, dtypes.qint32: _FilterInt, @@ -453,6 +459,15 @@ def MakeNdarray(tensor): else: return np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype).reshape(shape) + elif tensor_dtype == dtypes.complex128: + it = iter(tensor.dcomplex_val) + if len(tensor.dcomplex_val) == 2: + return np.repeat(np.array(complex(tensor.dcomplex_val[0], + tensor.dcomplex_val[1]), dtype=dtype), + num_elements).reshape(shape) + else: + return np.array([complex(x[0], x[1]) for x in zip(it, it)], + dtype=dtype).reshape(shape) elif tensor_dtype == dtypes.bool: if len(tensor.bool_val) == 1: return np.repeat(np.array(tensor.bool_val[0], dtype=dtype), diff --git a/tensorflow/python/framework/tensor_util_test.py b/tensorflow/python/framework/tensor_util_test.py index a2c28f0078f..d1cec3e0613 100644 --- a/tensorflow/python/framework/tensor_util_test.py +++ b/tensorflow/python/framework/tensor_util_test.py @@ -274,7 +274,7 @@ class TensorUtilTest(tf.test.TestCase): self.assertEquals(np.object, a.dtype) self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a) - def testComplex(self): + def testComplex64(self): t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex64) self.assertProtoEquals(""" dtype: DT_COMPLEX64 @@ -286,16 +286,30 @@ class TensorUtilTest(tf.test.TestCase): self.assertEquals(np.complex64, a.dtype) self.assertAllEqual(np.array(1 + 2j), a) - def testComplexWithImplicitRepeat(self): - t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4], - dtype=tf.complex64) + def testComplex128(self): + t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex128) + self.assertProtoEquals(""" + dtype: DT_COMPLEX128 + tensor_shape {} + dcomplex_val: 1 + dcomplex_val: 2 + """, t) a = tensor_util.MakeNdarray(t) - self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)], - [(1+1j), (1+1j), (1+1j), (1+1j)], - [(1+1j), (1+1j), (1+1j), (1+1j)]], - dtype=np.complex64), a) + self.assertEquals(np.complex128, a.dtype) + self.assertAllEqual(np.array(1 + 2j), a) - def testComplexN(self): + def testComplexWithImplicitRepeat(self): + for dtype, np_dtype in [(tf.complex64, np.complex64), + (tf.complex128, np.complex128)]: + t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4], + dtype=dtype) + a = tensor_util.MakeNdarray(t) + self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)], + [(1+1j), (1+1j), (1+1j), (1+1j)], + [(1+1j), (1+1j), (1+1j), (1+1j)]], + dtype=np_dtype), a) + + def testComplex64N(self): t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3], dtype=tf.complex64) self.assertProtoEquals(""" @@ -312,7 +326,24 @@ class TensorUtilTest(tf.test.TestCase): self.assertEquals(np.complex64, a.dtype) self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a) - def testComplexNpArray(self): + def testComplex128N(self): + t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3], + dtype=tf.complex128) + self.assertProtoEquals(""" + dtype: DT_COMPLEX128 + tensor_shape { dim { size: 1 } dim { size: 3 } } + dcomplex_val: 1 + dcomplex_val: 2 + dcomplex_val: 3 + dcomplex_val: 4 + dcomplex_val: 5 + dcomplex_val: 6 + """, t) + a = tensor_util.MakeNdarray(t) + self.assertEquals(np.complex128, a.dtype) + self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a) + + def testComplex64NpArray(self): t = tensor_util.make_tensor_proto( np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64) # scomplex_val are real_0, imag_0, real_1, imag_1, ... @@ -332,6 +363,26 @@ class TensorUtilTest(tf.test.TestCase): self.assertEquals(np.complex64, a.dtype) self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a) + def testComplex128NpArray(self): + t = tensor_util.make_tensor_proto( + np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex128) + # scomplex_val are real_0, imag_0, real_1, imag_1, ... + self.assertProtoEquals(""" + dtype: DT_COMPLEX128 + tensor_shape { dim { size: 2 } dim { size: 2 } } + dcomplex_val: 1 + dcomplex_val: 2 + dcomplex_val: 3 + dcomplex_val: 4 + dcomplex_val: 5 + dcomplex_val: 6 + dcomplex_val: 7 + dcomplex_val: 8 + """, t) + a = tensor_util.MakeNdarray(t) + self.assertEquals(np.complex128, a.dtype) + self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a) + def testUnsupportedDType(self): with self.assertRaises(TypeError): tensor_util.make_tensor_proto(np.array([1]), 0) diff --git a/tensorflow/python/lib/core/py_func.cc b/tensorflow/python/lib/core/py_func.cc index 5701e8fe70b..1949913ec81 100644 --- a/tensorflow/python/lib/core/py_func.cc +++ b/tensorflow/python/lib/core/py_func.cc @@ -99,6 +99,9 @@ Status TfDTypeToNpDType(const DataType& tf, int* np) { case DT_COMPLEX64: *np = NPY_COMPLEX64; break; + case DT_COMPLEX128: + *np = NPY_COMPLEX128; + break; case DT_STRING: *np = NPY_OBJECT; break; @@ -210,6 +213,9 @@ Status NumericNpDTypeToTfDType(const int np, DataType* tf) { case NPY_COMPLEX64: *tf = DT_COMPLEX64; break; + case NPY_COMPLEX128: + *tf = DT_COMPLEX128; + break; default: return errors::Unimplemented("Unsupported numpy type ", np); } diff --git a/tensorflow/python/training/session_manager.py b/tensorflow/python/training/session_manager.py index 08fb65db6d3..9e5b9a17209 100644 --- a/tensorflow/python/training/session_manager.py +++ b/tensorflow/python/training/session_manager.py @@ -326,7 +326,7 @@ class SessionManager(object): try: sess.run(self._ready_op) return None - except errors.FailedPreconditionError, e: + except errors.FailedPreconditionError as e: if "uninitialized" not in str(e): logging.warning("Model not ready raised: %s", str(e)) raise e diff --git a/tensorflow/tools/ci_build/Dockerfile.cpu b/tensorflow/tools/ci_build/Dockerfile.cpu index acc84f136a8..369daa9dcfb 100644 --- a/tensorflow/tools/ci_build/Dockerfile.cpu +++ b/tensorflow/tools/ci_build/Dockerfile.cpu @@ -7,6 +7,7 @@ COPY install/*.sh /install/ RUN /install/install_bootstrap_deb_packages.sh RUN add-apt-repository -y ppa:openjdk-r/ppa RUN /install/install_deb_packages.sh +RUN /install/install_pip_packages.sh RUN /install/install_bazel.sh # Set up bazelrc. diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu b/tensorflow/tools/ci_build/Dockerfile.gpu index b4b0ccccf75..81cc4c9f416 100644 --- a/tensorflow/tools/ci_build/Dockerfile.gpu +++ b/tensorflow/tools/ci_build/Dockerfile.gpu @@ -7,6 +7,7 @@ COPY install/*.sh /install/ RUN /install/install_bootstrap_deb_packages.sh RUN add-apt-repository -y ppa:openjdk-r/ppa RUN /install/install_deb_packages.sh +RUN /install/install_pip_packages.sh RUN /install/install_bazel.sh # Set up bazelrc. diff --git a/tensorflow/tools/ci_build/builds/pip.sh b/tensorflow/tools/ci_build/builds/pip.sh index 16364fbf9ee..7255de0bccf 100755 --- a/tensorflow/tools/ci_build/builds/pip.sh +++ b/tensorflow/tools/ci_build/builds/pip.sh @@ -22,11 +22,20 @@ # pip.sh CONTAINER_TYPE [--test_tutorials] # # When executing the Python unit tests, the script obeys the shell -# variables: TF_BUILD_BAZEL_CLEAN, NO_TEST_ON_INSTALL +# variables: TF_BUILD_BAZEL_CLEAN, TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES, +# TF_BUILD_NO_CACHING_VIRTUALENV, NO_TEST_ON_INSTALL # # TF_BUILD_BAZEL_CLEAN, if set to any non-empty and non-0 value, directs the # script to perform bazel clean prior to main build and test steps. # +# TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES overrides the default extra pip packages +# to be installed in virtualenv before test_installation.sh is called. Multiple +# pakcage names are separated with spaces. +# +# TF_BUILD_NO_CACHING_VIRTUALENV: If set to any non-empty and non-0 value, +# will cause the script to force remove any existing (cached) virtualenv +# directory. +# # If NO_TEST_ON_INSTALL has any non-empty and non-0 value, the test-on-install # part will be skipped. # @@ -35,6 +44,8 @@ # installation and the Python unit tests-on-install step. # +INSTALL_EXTRA_PIP_PACKAGES=${TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES} + # Helper functions # Get the absolute path from a path abs_path() { @@ -111,7 +122,7 @@ PIP_WHL_DIR="${PIP_TEST_ROOT}/whl" PIP_WHL_DIR=$(abs_path ${PIP_WHL_DIR}) # Get absolute path rm -rf ${PIP_WHL_DIR} && mkdir -p ${PIP_WHL_DIR} bazel-bin/tensorflow/tools/pip_package/build_pip_package ${PIP_WHL_DIR} || \ -die "build_pip_package FAILED" + die "build_pip_package FAILED" # Perform installation WHL_PATH=$(ls ${PIP_WHL_DIR}/tensorflow*.whl) @@ -125,27 +136,46 @@ echo "whl file path = ${WHL_PATH}" # Install, in user's local home folder echo "Installing pip whl file: ${WHL_PATH}" -# Create temporary directory for install test +# Create virtualenv directory for install test VENV_DIR="${PIP_TEST_ROOT}/venv" -rm -rf "${VENV_DIR}" && mkdir -p "${VENV_DIR}" -echo "Create directory for virtualenv: ${VENV_DIR}" +if [[ -d "${VENV_DIR}" ]] && + [[ ! -z "${TF_BUILD_NO_CACHING_VIRTUALENV}" ]] && + [[ "${TF_BUILD_NO_CACHING_VIRTUALENV}" != "0" ]]; then + echo "TF_BUILD_NO_CACHING_VIRTUALENV=${TF_BUILD_NO_CACHING_VIRTUALENV}:" + echo "Removing existing virtualenv directory: ${VENV_DIR}" + + rm -rf "${VENV_DIR}" || \ + die "Failed to remove existing virtualenv directory: ${VENV_DIR}" +fi + +mkdir -p ${VENV_DIR} || \ + die "FAILED to create virtualenv directory: ${VENV_DIR}" # Verify that virtualenv exists if [[ -z $(which virtualenv) ]]; then die "FAILED: virtualenv not available on path" fi -virtualenv -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" || -die "FAILED: Unable to create virtualenv" +virtualenv --system-site-packages -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" || \ + die "FAILED: Unable to create virtualenv" + +source "${VENV_DIR}/bin/activate" || \ + die "FAILED: Unable to activate virtualenv" -source "${VENV_DIR}/bin/activate" || -die "FAILED: Unable to activate virtualenv" # Install the pip file in virtual env -pip install -v ${WHL_PATH} \ +pip install -v --force-reinstall ${WHL_PATH} \ && echo "Successfully installed pip package ${WHL_PATH}" \ || die "pip install (without --upgrade) FAILED" +# Install extra pip packages required by the test-on-install +for PACKAGE in ${INSTALL_EXTRA_PIP_PACKAGES}; do + echo "Installing extra pip package required by test-on-install: ${PACKAGE}" + + pip install ${PACKAGE} || \ + die "pip install ${PACKAGE} FAILED" +done + # If NO_TEST_ON_INSTALL is set to any non-empty value, skip all Python # tests-on-install and exit right away if [[ ! -z "${NO_TEST_ON_INSTALL}" ]] && @@ -158,14 +188,14 @@ fi # Call test_installation.sh to perform test-on-install DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -"${DIR}/test_installation.sh" --virtualenv || -die "PIP tests-on-install FAILED" +"${DIR}/test_installation.sh" --virtualenv || \ + die "PIP tests-on-install FAILED" # Optional: Run the tutorial tests if [[ "${DO_TEST_TUTORIALS}" == "1" ]]; then - "${DIR}/test_tutorials.sh" --virtualenv || -die "PIP tutorial tests-on-install FAILED" + "${DIR}/test_tutorials.sh" --virtualenv || \ + die "PIP tutorial tests-on-install FAILED" fi -deactivate || -die "FAILED: Unable to deactivate virtualenv" +deactivate || \ + die "FAILED: Unable to deactivate virtualenv" diff --git a/tensorflow/tools/ci_build/builds/test_installation.sh b/tensorflow/tools/ci_build/builds/test_installation.sh index d2c8d21c5bd..8fa9b481f64 100755 --- a/tensorflow/tools/ci_build/builds/test_installation.sh +++ b/tensorflow/tools/ci_build/builds/test_installation.sh @@ -166,7 +166,8 @@ cp -r tensorflow/core/lib/png ${PY_TEST_DIR}/tensorflow/core/lib # Run tests DIR0=$(pwd) -ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} -name "*_test.py" | sort) +ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} \ + -type f \( -name "*_test.py" -o -name "test_*.py" \) | sort) # TODO(cais): Add tests in tensorflow/contrib PY_TEST_COUNT=$(echo ${ALL_PY_TESTS} | wc -w) diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh index 46c1740af61..9b7e5abd621 100755 --- a/tensorflow/tools/ci_build/ci_parameterized_build.sh +++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh @@ -306,7 +306,7 @@ if [[ "${DO_DOCKER}" == "1" ]]; then fi # Write to the tmp script -echo "#!/bin/bash" > ${TMP_SCRIPT} +echo "#!/usr/bin/env bash" > ${TMP_SCRIPT} if [[ ! -z "${TF_BUILD_BAZEL_CLEAN}" ]] && [[ "${TF_BUILD_BAZEL_CLEAN}" != "0" ]]; then echo ${BAZEL_CLEAN_CMD} >> ${TMP_SCRIPT} diff --git a/tensorflow/tools/ci_build/install/install_deb_packages.sh b/tensorflow/tools/ci_build/install/install_deb_packages.sh index b752e86d690..1bf77b236c2 100755 --- a/tensorflow/tools/ci_build/install/install_deb_packages.sh +++ b/tensorflow/tools/ci_build/install/install_deb_packages.sh @@ -29,10 +29,12 @@ apt-get install -y \ python-dev \ python-numpy \ python-pip \ + python-scipy \ python-virtualenv \ python3-dev \ python3-numpy \ python3-pip \ + python3-scipy \ sudo \ swig \ unzip \ diff --git a/tensorflow/tools/ci_build/install/install_pip_packages.sh b/tensorflow/tools/ci_build/install/install_pip_packages.sh new file mode 100755 index 00000000000..39583869e20 --- /dev/null +++ b/tensorflow/tools/ci_build/install/install_pip_packages.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +set -e + +pip install sklearn +pip3 install scikit-learn diff --git a/tensorflow/tools/docker/docker_run_gpu.sh b/tensorflow/tools/docker/docker_run_gpu.sh index 9ebfa701e4c..ead05f9150f 100755 --- a/tensorflow/tools/docker/docker_run_gpu.sh +++ b/tensorflow/tools/docker/docker_run_gpu.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/docker/run_jupyter.sh b/tensorflow/tools/docker/run_jupyter.sh index ba2f3a33262..eb69d62c073 100755 --- a/tensorflow/tools/docker/run_jupyter.sh +++ b/tensorflow/tools/docker/run_jupyter.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/docs/gen_docs.sh b/tensorflow/tools/docs/gen_docs.sh index 95c0092d4ac..de507fcd000 100755 --- a/tensorflow/tools/docs/gen_docs.sh +++ b/tensorflow/tools/docs/gen_docs.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/docs/gen_docs_test.sh b/tensorflow/tools/docs/gen_docs_test.sh index 2f905c8e47f..9375784dc23 100755 --- a/tensorflow/tools/docs/gen_docs_test.sh +++ b/tensorflow/tools/docs/gen_docs_test.sh @@ -1,4 +1,4 @@ -#!/bin/bash -eux +#!/usr/bin/env bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +14,8 @@ # limitations under the License. # ============================================================================== +set -eux + TFDIR=$TEST_SRCDIR/tensorflow DOXYGEN=doxygen DOXYGEN_CONFIG="tf-doxy_for_md-config" diff --git a/tensorflow/tools/pip_package/build_pip_package.sh b/tensorflow/tools/pip_package/build_pip_package.sh index 6b4e50490bb..1ae6926b676 100755 --- a/tensorflow/tools/pip_package/build_pip_package.sh +++ b/tensorflow/tools/pip_package/build_pip_package.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/swig/swig.sh b/tensorflow/tools/swig/swig.sh index 0601703b011..c35b2ee3634 100755 --- a/tensorflow/tools/swig/swig.sh +++ b/tensorflow/tools/swig/swig.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tensorflow/tools/test/BUILD b/tensorflow/tools/test/BUILD index a686bbee4ca..df2a0b45554 100644 --- a/tensorflow/tools/test/BUILD +++ b/tensorflow/tools/test/BUILD @@ -53,23 +53,21 @@ py_binary( # Unit test that calls run_and_gather_logs on a benchmark, and # prints the result. -cuda_py_test( - name = "run_and_gather_logs_test", - srcs = ["run_and_gather_logs.py"], - additional_deps = [ - ":run_and_gather_logs", - ], - args = [ - "--test_name=" + "//tensorflow/core/kernels:cast_op_test", - "--test_args=" + "'--benchmarks=BM_cpu_float_bfloat16'", - "--compilation_mode='$(COMPILATION_MODE)'", - "--cc_flags='$(CC_FLAGS)'", - ], - data = [ - "//tensorflow/core/kernels:cast_op_test", - ], - main = "run_and_gather_logs.py", -) +#cuda_py_test( +# name = "run_and_gather_logs_test", +# srcs = ["run_and_gather_logs.py"], +# additional_deps = [ +# ":run_and_gather_logs", +# ], +# args = [ +# "--test_name=" + "//tensorflow/core/kernels:cast_op_test", +# "--test_args=" + "'--benchmarks=BM_cpu_float'", +# ], +# data = [ +# "//tensorflow/core/kernels:cast_op_test", +# ], +# main = "run_and_gather_logs.py", +#) filegroup( name = "all_files", diff --git a/third_party/gpus/cuda/cuda_config.sh b/third_party/gpus/cuda/cuda_config.sh index 42cd254644b..651e5ae0317 100755 --- a/third_party/gpus/cuda/cuda_config.sh +++ b/third_party/gpus/cuda/cuda_config.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/util/python/python_config.sh b/util/python/python_config.sh index a5666c2f7ee..83e38566906 100755 --- a/util/python/python_config.sh +++ b/util/python/python_config.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License");