Rollforward of "Merge changes from github."
Change: 117375570
This commit is contained in:
parent
f1bccd320a
commit
4c85a08666
configure
tensorflow
cc/ops
contrib/linear_optimizer/python/ops
core
framework
allocator.hnode_def_util_test.ccnumeric_types.hop_def_builder_test.ccop_def_util_test.ccregister_types.htensor.cctensor.prototensor_test.cctensor_testutil.htypes.cctypes.htypes.proto
ops
public
util
examples
models/image/cifar10
python
tools
third_party/gpus/cuda
util/python
2
configure
vendored
2
configure
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
## Set up python-related environment settings
|
||||
while true; do
|
||||
|
@ -87,6 +87,9 @@ DEFINE_CONST(bool, bool_val);
|
||||
DEFINE_CONST_IMPL(complex64, proto.add_scomplex_val(t.begin()->real());
|
||||
proto.add_scomplex_val(t.begin()->imag()););
|
||||
|
||||
DEFINE_CONST_IMPL(complex128, proto.add_dcomplex_val(t.begin()->real());
|
||||
proto.add_dcomplex_val(t.begin()->imag()););
|
||||
|
||||
Node* Const(StringPiece s, const GraphDefBuilder::Options& options) {
|
||||
if (options.HaveError()) return nullptr;
|
||||
NodeBuilder node_builder(options.GetNameForOp(OpName()), OpName(),
|
||||
|
@ -49,6 +49,7 @@ DECLARE_CONST(uint8);
|
||||
DECLARE_CONST(int16);
|
||||
DECLARE_CONST(int8);
|
||||
DECLARE_CONST(complex64);
|
||||
DECLARE_CONST(complex128);
|
||||
DECLARE_CONST(int64);
|
||||
DECLARE_CONST(bool);
|
||||
|
||||
|
@ -21,6 +21,8 @@ import os.path
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
from six.moves import range # pylint: disable=redefined-builtin
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework.load_library import load_op_library
|
||||
@ -223,7 +225,7 @@ class SdcaModel(object):
|
||||
dense_features = self._convert_n_to_tensor(examples['dense_features'])
|
||||
dense_variables = self._convert_n_to_tensor(self._variables[
|
||||
'dense_features_weights'])
|
||||
for i in xrange(len(dense_variables)):
|
||||
for i in range(len(dense_variables)):
|
||||
predictions += dense_features[i] * dense_variables[i]
|
||||
return predictions
|
||||
|
||||
|
@ -187,13 +187,14 @@ class Allocator {
|
||||
|
||||
// is_simple<T>::value if T[] can be safely constructed and destructed
|
||||
// without running T() and ~T(). We do not use std::is_trivial<T>
|
||||
// directly because std::complex<float> is not trival but its array
|
||||
// can be constructed and destructed without running its default ctor
|
||||
// and dtor.
|
||||
// directly because std::complex<float> and std::complex<double> are
|
||||
// not trival, but their arrays can be constructed and destructed
|
||||
// without running their default ctors and dtors.
|
||||
template <typename T>
|
||||
struct is_simple {
|
||||
static const bool value = std::is_trivial<T>::value ||
|
||||
std::is_same<T, complex64>::value ||
|
||||
std::is_same<T, complex128>::value ||
|
||||
is_quantized<T>::value;
|
||||
};
|
||||
|
||||
|
@ -151,7 +151,7 @@ TEST(NodeDefUtilTest, Out) {
|
||||
ExpectFailure(bad, op,
|
||||
"Value for attr 'T' of string is not in the list of allowed "
|
||||
"values: float, double, int64, int32, uint8, uint16, int16, "
|
||||
"int8, complex64, qint8, quint8, qint32");
|
||||
"int8, complex64, complex128, qint8, quint8, qint32");
|
||||
}
|
||||
|
||||
TEST(NodeDefUtilTest, Enum) {
|
||||
|
@ -24,6 +24,8 @@ namespace tensorflow {
|
||||
|
||||
// Single precision complex.
|
||||
typedef std::complex<float> complex64;
|
||||
// Double precision complex.
|
||||
typedef std::complex<double> complex128;
|
||||
|
||||
} // end namespace tensorflow
|
||||
|
||||
|
@ -113,7 +113,7 @@ TEST_F(OpDefBuilderTest, AttrWithRestrictions) {
|
||||
ExpectSuccess(b().Attr("a:numbertype"),
|
||||
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
|
||||
"[DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_INT16, "
|
||||
"DT_UINT16, DT_INT8, DT_COMPLEX64, DT_QINT8, DT_QUINT8, "
|
||||
"DT_UINT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, "
|
||||
"DT_QINT32] } } }");
|
||||
ExpectSuccess(b().Attr("a:realnumbertype"),
|
||||
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
|
||||
|
@ -246,6 +246,10 @@ TEST_F(ValidateOpDefTest, BadAttrAllowed) {
|
||||
TestBuilder(OpDefBuilder("BadAttrtude")
|
||||
.Attr("x: list(realnumbertype) = [DT_COMPLEX64]")),
|
||||
"attr 'x' of complex64 is not in the list of allowed values");
|
||||
ExpectFailure(
|
||||
TestBuilder(OpDefBuilder("BadAttrtude")
|
||||
.Attr("x: list(realnumbertype) = [DT_COMPLEX128]")),
|
||||
"attr 'x' of complex128 is not in the list of allowed values");
|
||||
// Is in list of allowed strings.
|
||||
TF_EXPECT_OK(TestBuilder(
|
||||
OpDefBuilder("GoodAttrtude").Attr("x: {'foo', 'bar'} = 'bar'")));
|
||||
|
@ -63,14 +63,16 @@ limitations under the License.
|
||||
m(int16); \
|
||||
m(int8)
|
||||
|
||||
// Call "m" for all number types, including complex64.
|
||||
// Call "m" for all number types, including complex64 and complex128.
|
||||
#define TF_CALL_NUMBER_TYPES(m) \
|
||||
TF_CALL_REAL_NUMBER_TYPES(m); \
|
||||
m(complex64)
|
||||
m(complex64); \
|
||||
m(complex128)
|
||||
|
||||
#define TF_CALL_NUMBER_TYPES_NO_INT32(m) \
|
||||
TF_CALL_REAL_NUMBER_TYPES_NO_INT32(m); \
|
||||
m(complex64)
|
||||
m(complex64); \
|
||||
m(complex128)
|
||||
|
||||
#define TF_CALL_POD_TYPES(m) \
|
||||
TF_CALL_NUMBER_TYPES(m); \
|
||||
|
@ -215,6 +215,22 @@ struct ProtoHelper<complex64> {
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct ProtoHelper<complex128> {
|
||||
typedef Helper<double>::RepeatedFieldType FieldType;
|
||||
static const complex128* Begin(const TensorProto& proto) {
|
||||
return reinterpret_cast<const complex128*>(proto.dcomplex_val().data());
|
||||
}
|
||||
static size_t NumElements(const TensorProto& proto) {
|
||||
return proto.dcomplex_val().size() / 2;
|
||||
}
|
||||
static void Fill(const complex128* data, size_t n, TensorProto* proto) {
|
||||
const double* p = reinterpret_cast<const double*>(data);
|
||||
FieldType copy(p, p + n * 2);
|
||||
proto->mutable_dcomplex_val()->Swap(©);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct ProtoHelper<qint32> {
|
||||
typedef Helper<int32>::RepeatedFieldType FieldType;
|
||||
@ -385,6 +401,7 @@ void Tensor::UnsafeCopyFromInternal(const Tensor& other,
|
||||
CASE(int8, SINGLE_ARG(STMTS)) \
|
||||
CASE(string, SINGLE_ARG(STMTS)) \
|
||||
CASE(complex64, SINGLE_ARG(STMTS)) \
|
||||
CASE(complex128, SINGLE_ARG(STMTS)) \
|
||||
CASE(int64, SINGLE_ARG(STMTS)) \
|
||||
CASE(bool, SINGLE_ARG(STMTS)) \
|
||||
CASE(qint32, SINGLE_ARG(STMTS)) \
|
||||
|
@ -57,4 +57,8 @@ message TensorProto {
|
||||
|
||||
// DT_BOOL
|
||||
repeated bool bool_val = 11 [packed = true];
|
||||
|
||||
// DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real
|
||||
// and imaginary parts of i-th double precision complex.
|
||||
repeated double dcomplex_val = 12 [packed = true];
|
||||
};
|
||||
|
@ -47,12 +47,17 @@ TEST(TensorTest, DataType_Traits) {
|
||||
|
||||
// Unfortunately. std::complex::complex() initializes (0, 0).
|
||||
EXPECT_FALSE(std::is_trivial<complex64>::value);
|
||||
EXPECT_FALSE(std::is_trivial<std::complex<double>>::value);
|
||||
EXPECT_FALSE(std::is_trivial<complex128>::value);
|
||||
EXPECT_TRUE(std::is_trivial<float[2]>::value);
|
||||
struct MyComplex {
|
||||
EXPECT_TRUE(std::is_trivial<double[2]>::value);
|
||||
struct MyComplex64 {
|
||||
float re, im;
|
||||
};
|
||||
EXPECT_TRUE(std::is_trivial<MyComplex>::value);
|
||||
EXPECT_TRUE(std::is_trivial<MyComplex64>::value);
|
||||
struct MyComplex128 {
|
||||
double re, im;
|
||||
};
|
||||
EXPECT_TRUE(std::is_trivial<MyComplex128>::value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -420,13 +425,19 @@ TEST(Tensor_Bool, SimpleWithHelper) {
|
||||
test::ExpectTensorEqual<bool>(t1, t2);
|
||||
}
|
||||
|
||||
TEST(Tensor_Complex, Simple) {
|
||||
TEST(Tensor_Complex, Simple64) {
|
||||
Tensor t(DT_COMPLEX64, {4, 5, 3, 7});
|
||||
t.flat<complex64>().setRandom();
|
||||
TestCopies<complex64>(t);
|
||||
}
|
||||
|
||||
TEST(Tensor_Complex, SimpleWithHelper) {
|
||||
TEST(Tensor_Complex, Simple128) {
|
||||
Tensor t(DT_COMPLEX128, {4, 5, 3, 7});
|
||||
t.flat<complex128>().setRandom();
|
||||
TestCopies<complex128>(t);
|
||||
}
|
||||
|
||||
TEST(Tensor_Complex, SimpleWithHelper64) {
|
||||
{
|
||||
Tensor t1 = test::AsTensor<complex64>({0,
|
||||
{1, 1},
|
||||
@ -444,7 +455,7 @@ TEST(Tensor_Complex, SimpleWithHelper) {
|
||||
test::ExpectTensorEqual<complex64>(t2, t3);
|
||||
}
|
||||
|
||||
// Does some numeric operations for complex numbers.
|
||||
// Does some numeric operations for complex64 numbers.
|
||||
{
|
||||
const float PI = std::acos(-1);
|
||||
const complex64 rotate_45 = std::polar(1.0f, PI / 4);
|
||||
@ -475,6 +486,55 @@ TEST(Tensor_Complex, SimpleWithHelper) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Tensor_Complex, SimpleWithHelper128) {
|
||||
{
|
||||
Tensor t1 = test::AsTensor<complex128>({0,
|
||||
{1, 1},
|
||||
complex128(2),
|
||||
complex128(3, 3),
|
||||
complex128(0, 4),
|
||||
complex128(2, 5)},
|
||||
{2, 3});
|
||||
Tensor t2(t1.dtype(), t1.shape());
|
||||
t2.flat<complex128>() = t1.flat<complex128>() * complex128(0, 2);
|
||||
Tensor t3 = test::AsTensor<complex128>(
|
||||
{0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}},
|
||||
// shape
|
||||
{2, 3});
|
||||
test::ExpectTensorEqual<complex128>(t2, t3);
|
||||
}
|
||||
|
||||
// Does some numeric operations for complex128 numbers.
|
||||
{
|
||||
const double PI = std::acos(-1);
|
||||
const complex128 rotate_45 = std::polar(1.0, PI / 4);
|
||||
|
||||
// x contains all the 8-th root of unity.
|
||||
Tensor x(DT_COMPLEX128, TensorShape({8}));
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
x.vec<complex128>()(i) = std::pow(rotate_45, i);
|
||||
}
|
||||
|
||||
// Shift the roots by 45 degree.
|
||||
Tensor y(DT_COMPLEX128, TensorShape({8}));
|
||||
y.vec<complex128>() = x.vec<complex128>() * rotate_45;
|
||||
Tensor y_expected(DT_COMPLEX128, TensorShape({8}));
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
y_expected.vec<complex128>()(i) = std::pow(rotate_45, i + 1);
|
||||
}
|
||||
test::ExpectTensorNear<complex128>(y, y_expected, 1e-5);
|
||||
|
||||
// Raise roots to the power of 8.
|
||||
Tensor z(DT_COMPLEX128, TensorShape({8}));
|
||||
z.vec<complex128>() = x.vec<complex128>().pow(8);
|
||||
Tensor z_expected(DT_COMPLEX128, TensorShape({8}));
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
z_expected.vec<complex128>()(i) = 1;
|
||||
}
|
||||
test::ExpectTensorNear<complex128>(z, z_expected, 1e-5);
|
||||
}
|
||||
}
|
||||
|
||||
// On the alignment.
|
||||
//
|
||||
// As of 2015/8, tensorflow::Tensor allocates its buffer with 32-byte
|
||||
|
@ -127,6 +127,12 @@ inline void ExpectEqual<complex64>(const complex64& a, const complex64& b) {
|
||||
EXPECT_FLOAT_EQ(a.imag(), b.imag()) << a << " vs. " << b;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline void ExpectEqual<complex128>(const complex128& a, const complex128& b) {
|
||||
EXPECT_DOUBLE_EQ(a.real(), b.real()) << a << " vs. " << b;
|
||||
EXPECT_DOUBLE_EQ(a.imag(), b.imag()) << a << " vs. " << b;
|
||||
}
|
||||
|
||||
inline void AssertSameTypeDims(const Tensor& x, const Tensor& y) {
|
||||
ASSERT_EQ(x.dtype(), y.dtype());
|
||||
ASSERT_TRUE(x.IsSameSize(y))
|
||||
|
@ -64,6 +64,8 @@ string DataTypeString(DataType dtype) {
|
||||
return "string";
|
||||
case DT_COMPLEX64:
|
||||
return "complex64";
|
||||
case DT_COMPLEX128:
|
||||
return "complex128";
|
||||
case DT_INT64:
|
||||
return "int64";
|
||||
case DT_BOOL:
|
||||
@ -125,6 +127,9 @@ bool DataTypeFromString(StringPiece sp, DataType* dt) {
|
||||
} else if (sp == "complex64") {
|
||||
*dt = DT_COMPLEX64;
|
||||
return true;
|
||||
} else if (sp == "complex128") {
|
||||
*dt = DT_COMPLEX128;
|
||||
return true;
|
||||
} else if (sp == "int64") {
|
||||
*dt = DT_INT64;
|
||||
return true;
|
||||
@ -165,9 +170,10 @@ string DataTypeSliceString(const DataTypeSlice types) {
|
||||
}
|
||||
|
||||
DataTypeVector AllTypes() {
|
||||
return {DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16, DT_UINT16,
|
||||
DT_INT8, DT_STRING, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8,
|
||||
DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32};
|
||||
return {DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16,
|
||||
DT_UINT16, DT_INT8, DT_STRING, DT_COMPLEX64, DT_COMPLEX128,
|
||||
DT_INT64, DT_BOOL, DT_QINT8, DT_QUINT8, DT_QINT16,
|
||||
DT_QUINT16, DT_QINT32};
|
||||
}
|
||||
|
||||
#if !defined(__ANDROID__)
|
||||
@ -188,8 +194,9 @@ DataTypeVector RealAndQuantizedTypes() {
|
||||
}
|
||||
|
||||
DataTypeVector NumberTypes() {
|
||||
return {DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_UINT16,
|
||||
DT_INT16, DT_INT8, DT_COMPLEX64, DT_QINT8, DT_QUINT8, DT_QINT32};
|
||||
return {DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
|
||||
DT_UINT16, DT_INT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128,
|
||||
DT_QINT8, DT_QUINT8, DT_QINT32 };
|
||||
}
|
||||
|
||||
#else // defined(__ANDROID__)
|
||||
@ -223,6 +230,7 @@ bool DataTypeCanUseMemcpy(DataType dt) {
|
||||
case DT_INT16:
|
||||
case DT_INT8:
|
||||
case DT_COMPLEX64:
|
||||
case DT_COMPLEX128:
|
||||
case DT_INT64:
|
||||
case DT_BOOL:
|
||||
case DT_QINT8:
|
||||
|
@ -174,6 +174,7 @@ MATCH_TYPE_AND_ENUM(int16, DT_INT16);
|
||||
MATCH_TYPE_AND_ENUM(int8, DT_INT8);
|
||||
MATCH_TYPE_AND_ENUM(string, DT_STRING);
|
||||
MATCH_TYPE_AND_ENUM(complex64, DT_COMPLEX64);
|
||||
MATCH_TYPE_AND_ENUM(complex128, DT_COMPLEX128);
|
||||
MATCH_TYPE_AND_ENUM(int64, DT_INT64);
|
||||
MATCH_TYPE_AND_ENUM(bool, DT_BOOL);
|
||||
MATCH_TYPE_AND_ENUM(qint8, DT_QINT8);
|
||||
|
@ -30,10 +30,10 @@ enum DataType {
|
||||
DT_QINT16 = 15; // Quantized int16
|
||||
DT_QUINT16 = 16; // Quantized uint16
|
||||
DT_UINT16 = 17;
|
||||
DT_COMPLEX128 = 18; // Double-precision complex
|
||||
|
||||
// TODO(josh11b): DT_GENERIC_PROTO = ??;
|
||||
// TODO(jeff,josh11b): DT_UINT64? DT_UINT32?
|
||||
// TODO(zhifengc): DT_COMPLEX128 (double-precision complex)?
|
||||
|
||||
// Do not use! These are only for parameters. Every enum above
|
||||
// should have a corresponding value below (verified by types_test).
|
||||
@ -54,4 +54,5 @@ enum DataType {
|
||||
DT_QINT16_REF = 115;
|
||||
DT_QUINT16_REF = 116;
|
||||
DT_UINT16_REF = 117;
|
||||
DT_COMPLEX128_REF = 118;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -102,6 +102,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -331,6 +332,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -423,6 +425,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -505,6 +508,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -560,6 +564,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -625,6 +630,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -706,6 +712,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -752,6 +759,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -789,6 +797,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -902,6 +911,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -952,6 +962,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -1378,6 +1389,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -1463,6 +1475,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -1538,6 +1551,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -1587,6 +1601,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -1641,6 +1656,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -1674,6 +1690,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -1694,6 +1711,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -4320,6 +4338,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -5109,6 +5128,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -5419,6 +5439,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -5520,6 +5541,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -6420,6 +6442,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -8132,6 +8155,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -8197,6 +8221,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -9146,6 +9171,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -9243,6 +9269,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -9324,6 +9351,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
@ -10234,6 +10262,7 @@ op {
|
||||
type: DT_INT16
|
||||
type: DT_INT8
|
||||
type: DT_COMPLEX64
|
||||
type: DT_COMPLEX128
|
||||
type: DT_QINT8
|
||||
type: DT_QUINT8
|
||||
type: DT_QINT32
|
||||
|
@ -78,7 +78,8 @@ typedef enum {
|
||||
TF_INT16 = 5,
|
||||
TF_INT8 = 6,
|
||||
TF_STRING = 7,
|
||||
TF_COMPLEX = 8, // Single-precision complex
|
||||
TF_COMPLEX64 = 8, // Single-precision complex
|
||||
TF_COMPLEX = 8, // Old identifier kept for API backwards compatibility
|
||||
TF_INT64 = 9,
|
||||
TF_BOOL = 10,
|
||||
TF_QINT8 = 11, // Quantized int8
|
||||
@ -88,6 +89,7 @@ typedef enum {
|
||||
TF_QINT16 = 15, // Quantized int16
|
||||
TF_QUINT16 = 16, // Quantized uint16
|
||||
TF_UINT16 = 17,
|
||||
TF_COMPLEX128 = 18, // Double-precision complex
|
||||
} TF_DataType;
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
|
@ -108,6 +108,7 @@ TENSOR_PROTO_EXTRACT_TYPE(bool, bool, bool);
|
||||
TENSOR_PROTO_EXTRACT_TYPE(float, float, float);
|
||||
TENSOR_PROTO_EXTRACT_TYPE(double, double, double);
|
||||
TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex64, scomplex, float);
|
||||
TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex128, dcomplex, double);
|
||||
TENSOR_PROTO_EXTRACT_TYPE(int32, int, int32);
|
||||
TENSOR_PROTO_EXTRACT_TYPE(int64, int64, int64);
|
||||
TENSOR_PROTO_EXTRACT_TYPE(uint8, int, int32);
|
||||
|
@ -19,7 +19,7 @@ installed on your system.
|
||||
3. The Android SDK and build tools may be obtained from:
|
||||
https://developer.android.com/tools/revisions/build-tools.html
|
||||
|
||||
The Android entries in [`<workspace_root>/WORKSPACE`](../../WORKSPACE) must be
|
||||
The Android entries in [`<workspace_root>/WORKSPACE`](../../../WORKSPACE#L2-L13) must be
|
||||
uncommented with the paths filled in appropriately depending on where you
|
||||
installed the NDK and SDK. Otherwise an error such as:
|
||||
"The external label '//external:android/sdk' is not bound to anything" will
|
||||
@ -45,10 +45,8 @@ your workspace root:
|
||||
$ bazel build //tensorflow/examples/android:tensorflow_demo
|
||||
```
|
||||
|
||||
If you get build errors about protocol buffers then you may have left out the
|
||||
`--recurse-submodules` argument to `git clone`. Review the instructions
|
||||
here and then build again:
|
||||
https://www.tensorflow.org/versions/master/get_started/os_setup.html#clone-the-tensorflow-repository
|
||||
If you get build errors about protocol buffers, run
|
||||
`git submodule update --init` and build again.
|
||||
|
||||
If adb debugging is enabled on your Android 5.0 or later device, you may then
|
||||
use the following command from your workspace root to install the APK once
|
||||
|
@ -43,15 +43,15 @@ This uses the default example image that ships with the framework, and should
|
||||
output something similar to this:
|
||||
|
||||
```
|
||||
I tensorflow/examples/label_image/main.cc:200] military uniform (866): 0.902268
|
||||
I tensorflow/examples/label_image/main.cc:200] bow tie (817): 0.05407
|
||||
I tensorflow/examples/label_image/main.cc:200] suit (794): 0.0113195
|
||||
I tensorflow/examples/label_image/main.cc:200] bulletproof vest (833): 0.0100269
|
||||
I tensorflow/examples/label_image/main.cc:200] bearskin (849): 0.00649746
|
||||
I tensorflow/examples/label_image/main.cc:207] military uniform (866): 0.647299
|
||||
I tensorflow/examples/label_image/main.cc:207] suit (794): 0.0477195
|
||||
I tensorflow/examples/label_image/main.cc:207] academic gown (896): 0.0232407
|
||||
I tensorflow/examples/label_image/main.cc:207] bow tie (817): 0.0157355
|
||||
I tensorflow/examples/label_image/main.cc:207] bolo tie (940): 0.0145023
|
||||
```
|
||||
In this case, we're using the default image of Admiral Grace Hopper, and you can
|
||||
see the network correctly spots she's wearing a military uniform, with a high
|
||||
score of 0.9.
|
||||
score of 0.6.
|
||||
|
||||
Next, try it out on your own images by supplying the --image= argument, e.g.
|
||||
|
||||
|
@ -117,7 +117,7 @@
|
||||
" print('Found and verified', filename)\n",
|
||||
" else:\n",
|
||||
" raise Exception(\n",
|
||||
" 'Failed to verify' + filename + '. Can you get to it with a browser?')\n",
|
||||
" 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n",
|
||||
" return filename\n",
|
||||
"\n",
|
||||
"train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n",
|
||||
|
@ -67,7 +67,7 @@ NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
|
||||
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
|
||||
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
|
||||
|
||||
# If a model is trained with multiple GPU's prefix all Op names with tower_name
|
||||
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
|
||||
# to differentiate the operations. Note that this prefix is removed from the
|
||||
# names of the summaries when visualizing a model.
|
||||
TOWER_NAME = 'tower'
|
||||
@ -255,7 +255,7 @@ def inference(images):
|
||||
def loss(logits, labels):
|
||||
"""Add L2Loss to all the trainable variables.
|
||||
|
||||
Add summary for for "Loss" and "Loss/avg".
|
||||
Add summary for "Loss" and "Loss/avg".
|
||||
Args:
|
||||
logits: Logits from inference().
|
||||
labels: Labels from distorted_inputs or inputs(). 1-D tensor
|
||||
|
@ -172,7 +172,7 @@ def distorted_inputs(data_dir, batch_size):
|
||||
distorted_image = tf.image.random_flip_left_right(distorted_image)
|
||||
|
||||
# Because these operations are not commutative, consider randomizing
|
||||
# randomize the order their operation.
|
||||
# the order their operation.
|
||||
distorted_image = tf.image.random_brightness(distorted_image,
|
||||
max_delta=63)
|
||||
distorted_image = tf.image.random_contrast(distorted_image,
|
||||
|
@ -181,6 +181,7 @@ __all__.extend([
|
||||
'bfloat16', 'bfloat16_ref',
|
||||
'bool', 'bool_ref',
|
||||
'complex64', 'complex64_ref',
|
||||
'complex128', 'complex128_ref',
|
||||
'double', 'double_ref',
|
||||
'float32', 'float32_ref',
|
||||
'float64', 'float64_ref',
|
||||
|
@ -687,7 +687,8 @@ class SessionTest(test_util.TensorFlowTestCase):
|
||||
dtypes.int8,
|
||||
dtypes.int64,
|
||||
dtypes.bool,
|
||||
dtypes.complex64]:
|
||||
dtypes.complex64,
|
||||
dtypes.complex128]:
|
||||
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
|
||||
np_dtype = dtype.as_numpy_dtype
|
||||
|
||||
@ -700,6 +701,8 @@ class SessionTest(test_util.TensorFlowTestCase):
|
||||
np_array = np_array > 0
|
||||
elif dtype == dtypes.complex64:
|
||||
np_array = np.sqrt(np_array.astype(np_dtype))
|
||||
elif dtype == dtypes.complex64:
|
||||
np_array = np.sqrt(np_array.astype(np_dtype))
|
||||
else:
|
||||
np_array = np_array.astype(np_dtype)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright 2015 Google Inc. All Rights Reserved.
|
||||
/* Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -121,7 +121,10 @@ Status PyArray_TYPE_to_TF_DataType(PyArrayObject* array,
|
||||
*out_tf_datatype = TF_BOOL;
|
||||
break;
|
||||
case NPY_COMPLEX64:
|
||||
*out_tf_datatype = TF_COMPLEX;
|
||||
*out_tf_datatype = TF_COMPLEX64;
|
||||
break;
|
||||
case NPY_COMPLEX128:
|
||||
*out_tf_datatype = TF_COMPLEX128;
|
||||
break;
|
||||
case NPY_OBJECT:
|
||||
*out_tf_datatype = TF_STRING;
|
||||
@ -168,9 +171,12 @@ Status TF_DataType_to_PyArray_TYPE(TF_DataType tf_datatype,
|
||||
case TF_BOOL:
|
||||
*out_pyarray_type = NPY_BOOL;
|
||||
break;
|
||||
case TF_COMPLEX:
|
||||
case TF_COMPLEX64:
|
||||
*out_pyarray_type = NPY_COMPLEX64;
|
||||
break;
|
||||
case TF_COMPLEX128:
|
||||
*out_pyarray_type = NPY_COMPLEX128;
|
||||
break;
|
||||
case TF_STRING:
|
||||
*out_pyarray_type = NPY_OBJECT;
|
||||
break;
|
||||
|
@ -32,6 +32,7 @@ class DType(object):
|
||||
* `tf.float64`: 64-bit double-precision floating-point.
|
||||
* `tf.bfloat16`: 16-bit truncated floating-point.
|
||||
* `tf.complex64`: 64-bit single-precision complex.
|
||||
* `tf.complex128`: 128-bit double-precision complex.
|
||||
|
||||
* `tf.int8`: 8-bit signed integer.
|
||||
* `tf.uint8`: 8-bit unsigned integer.
|
||||
@ -122,6 +123,8 @@ class DType(object):
|
||||
base = self.base_dtype
|
||||
if base == complex64:
|
||||
return float32
|
||||
elif base == complex128:
|
||||
return float64
|
||||
else:
|
||||
return self
|
||||
|
||||
@ -149,7 +152,7 @@ class DType(object):
|
||||
@property
|
||||
def is_complex(self):
|
||||
"""Returns whether this is a complex floating point type."""
|
||||
return self.base_dtype == complex64
|
||||
return self.base_dtype in (complex64, complex128)
|
||||
|
||||
@property
|
||||
def is_quantized(self):
|
||||
@ -179,8 +182,8 @@ class DType(object):
|
||||
TypeError: if this is a non-numeric, unordered, or quantized type.
|
||||
|
||||
"""
|
||||
if (self.is_quantized or self.base_dtype == bool or
|
||||
self.base_dtype == string or self.base_dtype == complex64):
|
||||
if (self.is_quantized or self.base_dtype in
|
||||
(bool, string, complex64, complex128)):
|
||||
raise TypeError("Cannot find minimum value of %s." % self)
|
||||
|
||||
# there is no simple way to get the min value of a dtype, we have to check
|
||||
@ -201,8 +204,8 @@ class DType(object):
|
||||
TypeError: if this is a non-numeric, unordered, or quantized type.
|
||||
|
||||
"""
|
||||
if (self.is_quantized or self.base_dtype == bool or
|
||||
self.base_dtype == string or self.base_dtype == complex64):
|
||||
if (self.is_quantized or self.base_dtype in
|
||||
(bool, string, complex64, complex128)):
|
||||
raise TypeError("Cannot find maximum value of %s." % self)
|
||||
|
||||
# there is no simple way to get the min value of a dtype, we have to check
|
||||
@ -277,6 +280,7 @@ int16 = DType(types_pb2.DT_INT16)
|
||||
int8 = DType(types_pb2.DT_INT8)
|
||||
string = DType(types_pb2.DT_STRING)
|
||||
complex64 = DType(types_pb2.DT_COMPLEX64)
|
||||
complex128 = DType(types_pb2.DT_COMPLEX128)
|
||||
int64 = DType(types_pb2.DT_INT64)
|
||||
bool = DType(types_pb2.DT_BOOL)
|
||||
qint8 = DType(types_pb2.DT_QINT8)
|
||||
@ -295,6 +299,7 @@ int16_ref = DType(types_pb2.DT_INT16_REF)
|
||||
int8_ref = DType(types_pb2.DT_INT8_REF)
|
||||
string_ref = DType(types_pb2.DT_STRING_REF)
|
||||
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
|
||||
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
|
||||
int64_ref = DType(types_pb2.DT_INT64_REF)
|
||||
bool_ref = DType(types_pb2.DT_BOOL_REF)
|
||||
qint8_ref = DType(types_pb2.DT_QINT8_REF)
|
||||
@ -317,6 +322,7 @@ _INTERN_TABLE = {
|
||||
types_pb2.DT_INT8: int8,
|
||||
types_pb2.DT_STRING: string,
|
||||
types_pb2.DT_COMPLEX64: complex64,
|
||||
types_pb2.DT_COMPLEX128: complex128,
|
||||
types_pb2.DT_INT64: int64,
|
||||
types_pb2.DT_BOOL: bool,
|
||||
types_pb2.DT_QINT8: qint8,
|
||||
@ -334,6 +340,7 @@ _INTERN_TABLE = {
|
||||
types_pb2.DT_INT8_REF: int8_ref,
|
||||
types_pb2.DT_STRING_REF: string_ref,
|
||||
types_pb2.DT_COMPLEX64_REF: complex64_ref,
|
||||
types_pb2.DT_COMPLEX128_REF: complex128_ref,
|
||||
types_pb2.DT_INT64_REF: int64_ref,
|
||||
types_pb2.DT_BOOL_REF: bool_ref,
|
||||
types_pb2.DT_QINT8_REF: qint8_ref,
|
||||
@ -356,6 +363,7 @@ _TYPE_TO_STRING = {
|
||||
types_pb2.DT_INT8: "int8",
|
||||
types_pb2.DT_STRING: "string",
|
||||
types_pb2.DT_COMPLEX64: "complex64",
|
||||
types_pb2.DT_COMPLEX128: "complex128",
|
||||
types_pb2.DT_INT64: "int64",
|
||||
types_pb2.DT_BOOL: "bool",
|
||||
types_pb2.DT_QINT8: "qint8",
|
||||
@ -373,6 +381,7 @@ _TYPE_TO_STRING = {
|
||||
types_pb2.DT_INT8_REF: "int8_ref",
|
||||
types_pb2.DT_STRING_REF: "string_ref",
|
||||
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
|
||||
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
|
||||
types_pb2.DT_INT64_REF: "int64_ref",
|
||||
types_pb2.DT_BOOL_REF: "bool_ref",
|
||||
types_pb2.DT_QINT8_REF: "qint8_ref",
|
||||
@ -414,6 +423,7 @@ _NP_TO_TF = frozenset([
|
||||
(np.int16, int16),
|
||||
(np.int8, int8),
|
||||
(np.complex64, complex64),
|
||||
(np.complex128, complex128),
|
||||
(np.object, string),
|
||||
(np.bool, bool),
|
||||
(_np_qint8, qint8),
|
||||
@ -435,6 +445,7 @@ _TF_TO_NP = {
|
||||
# strings.
|
||||
types_pb2.DT_STRING: np.object,
|
||||
types_pb2.DT_COMPLEX64: np.complex64,
|
||||
types_pb2.DT_COMPLEX128: np.complex128,
|
||||
types_pb2.DT_INT64: np.int64,
|
||||
types_pb2.DT_BOOL: np.bool,
|
||||
types_pb2.DT_QINT8: _np_qint8,
|
||||
@ -454,6 +465,7 @@ _TF_TO_NP = {
|
||||
types_pb2.DT_INT8_REF: np.int8,
|
||||
types_pb2.DT_STRING_REF: np.object,
|
||||
types_pb2.DT_COMPLEX64_REF: np.complex64,
|
||||
types_pb2.DT_COMPLEX128_REF: np.complex128,
|
||||
types_pb2.DT_INT64_REF: np.int64,
|
||||
types_pb2.DT_BOOL_REF: np.bool,
|
||||
types_pb2.DT_QINT8_REF: _np_qint8,
|
||||
|
@ -71,6 +71,7 @@ class TypesTest(test_util.TensorFlowTestCase):
|
||||
self.assertIs(tf.int16, tf.as_dtype(np.int16))
|
||||
self.assertIs(tf.int8, tf.as_dtype(np.int8))
|
||||
self.assertIs(tf.complex64, tf.as_dtype(np.complex64))
|
||||
self.assertIs(tf.complex128, tf.as_dtype(np.complex128))
|
||||
self.assertIs(tf.string, tf.as_dtype(np.object))
|
||||
self.assertIs(tf.string, tf.as_dtype(np.array(["foo", "bar"]).dtype))
|
||||
self.assertIs(tf.bool, tf.as_dtype(np.bool))
|
||||
@ -82,6 +83,7 @@ class TypesTest(test_util.TensorFlowTestCase):
|
||||
tf.int32, tf.int64]:
|
||||
self.assertIs(dtype.real_dtype, dtype)
|
||||
self.assertIs(tf.complex64.real_dtype, tf.float32)
|
||||
self.assertIs(tf.complex128.real_dtype, tf.float64)
|
||||
|
||||
def testStringConversion(self):
|
||||
self.assertIs(tf.float32, tf.as_dtype("float32"))
|
||||
@ -93,6 +95,7 @@ class TypesTest(test_util.TensorFlowTestCase):
|
||||
self.assertIs(tf.int8, tf.as_dtype("int8"))
|
||||
self.assertIs(tf.string, tf.as_dtype("string"))
|
||||
self.assertIs(tf.complex64, tf.as_dtype("complex64"))
|
||||
self.assertIs(tf.complex128, tf.as_dtype("complex128"))
|
||||
self.assertIs(tf.int64, tf.as_dtype("int64"))
|
||||
self.assertIs(tf.bool, tf.as_dtype("bool"))
|
||||
self.assertIs(tf.qint8, tf.as_dtype("qint8"))
|
||||
@ -107,6 +110,7 @@ class TypesTest(test_util.TensorFlowTestCase):
|
||||
self.assertIs(tf.int8_ref, tf.as_dtype("int8_ref"))
|
||||
self.assertIs(tf.string_ref, tf.as_dtype("string_ref"))
|
||||
self.assertIs(tf.complex64_ref, tf.as_dtype("complex64_ref"))
|
||||
self.assertIs(tf.complex128_ref, tf.as_dtype("complex128_ref"))
|
||||
self.assertIs(tf.int64_ref, tf.as_dtype("int64_ref"))
|
||||
self.assertIs(tf.bool_ref, tf.as_dtype("bool_ref"))
|
||||
self.assertIs(tf.qint8_ref, tf.as_dtype("qint8_ref"))
|
||||
@ -135,6 +139,7 @@ class TypesTest(test_util.TensorFlowTestCase):
|
||||
self.assertEqual(tf.as_dtype("uint8").is_integer, True)
|
||||
self.assertEqual(tf.as_dtype("uint16").is_integer, True)
|
||||
self.assertEqual(tf.as_dtype("complex64").is_integer, False)
|
||||
self.assertEqual(tf.as_dtype("complex128").is_integer, False)
|
||||
self.assertEqual(tf.as_dtype("float").is_integer, False)
|
||||
self.assertEqual(tf.as_dtype("double").is_integer, False)
|
||||
self.assertEqual(tf.as_dtype("string").is_integer, False)
|
||||
@ -148,6 +153,7 @@ class TypesTest(test_util.TensorFlowTestCase):
|
||||
self.assertEqual(tf.as_dtype("uint8").is_floating, False)
|
||||
self.assertEqual(tf.as_dtype("uint16").is_floating, False)
|
||||
self.assertEqual(tf.as_dtype("complex64").is_floating, False)
|
||||
self.assertEqual(tf.as_dtype("complex128").is_floating, False)
|
||||
self.assertEqual(tf.as_dtype("float32").is_floating, True)
|
||||
self.assertEqual(tf.as_dtype("float64").is_floating, True)
|
||||
self.assertEqual(tf.as_dtype("string").is_floating, False)
|
||||
@ -161,6 +167,7 @@ class TypesTest(test_util.TensorFlowTestCase):
|
||||
self.assertEqual(tf.as_dtype("uint8").is_complex, False)
|
||||
self.assertEqual(tf.as_dtype("uint16").is_complex, False)
|
||||
self.assertEqual(tf.as_dtype("complex64").is_complex, True)
|
||||
self.assertEqual(tf.as_dtype("complex128").is_complex, True)
|
||||
self.assertEqual(tf.as_dtype("float32").is_complex, False)
|
||||
self.assertEqual(tf.as_dtype("float64").is_complex, False)
|
||||
self.assertEqual(tf.as_dtype("string").is_complex, False)
|
||||
@ -178,6 +185,7 @@ class TypesTest(test_util.TensorFlowTestCase):
|
||||
self.assertEqual(tf.as_dtype("bool").is_unsigned, False)
|
||||
self.assertEqual(tf.as_dtype("string").is_unsigned, False)
|
||||
self.assertEqual(tf.as_dtype("complex64").is_unsigned, False)
|
||||
self.assertEqual(tf.as_dtype("complex128").is_unsigned, False)
|
||||
|
||||
def testMinMax(self):
|
||||
# make sure min/max evaluates for all data types that have min/max
|
||||
@ -192,7 +200,8 @@ class TypesTest(test_util.TensorFlowTestCase):
|
||||
if (dtype.is_quantized or
|
||||
dtype.base_dtype == tf.bool or
|
||||
dtype.base_dtype == tf.string or
|
||||
dtype.base_dtype == tf.complex64):
|
||||
dtype.base_dtype == tf.complex64 or
|
||||
dtype.base_dtype == tf.complex128):
|
||||
continue
|
||||
|
||||
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
|
||||
|
@ -1289,7 +1289,7 @@ class ColocationGroupTest(test_util.TensorFlowTestCase):
|
||||
with ops.colocate_with(a.op):
|
||||
with ops.colocate_with(b.op, ignore_existing=True):
|
||||
c = constant_op.constant(4.0)
|
||||
self.assertEqual(set(["loc:@b"]), set(c.op.colocation_groups()))
|
||||
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
|
||||
|
||||
def testColocateVariables(self):
|
||||
a = variables.Variable([2.0], name="a")
|
||||
|
@ -76,11 +76,16 @@ else:
|
||||
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
|
||||
|
||||
def SlowAppendComplexArrayToTensorProto(tensor_proto, proto_values):
|
||||
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.scomplex_val.extend([np.asscalar(v)
|
||||
for x in proto_values
|
||||
for v in [x.real, x.imag]])
|
||||
|
||||
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.dcomplex_val.extend([np.asscalar(v)
|
||||
for x in proto_values
|
||||
for v in [x.real, x.imag]])
|
||||
|
||||
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
|
||||
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
|
||||
|
||||
@ -96,8 +101,8 @@ else:
|
||||
np.uint16: SlowAppendIntArrayToTensorProto,
|
||||
np.int16: SlowAppendIntArrayToTensorProto,
|
||||
np.int8: SlowAppendIntArrayToTensorProto,
|
||||
np.complex64: SlowAppendComplexArrayToTensorProto,
|
||||
np.complex128: SlowAppendComplexArrayToTensorProto,
|
||||
np.complex64: SlowAppendComplex64ArrayToTensorProto,
|
||||
np.complex128: SlowAppendComplex128ArrayToTensorProto,
|
||||
np.object: SlowAppendObjectArrayToTensorProto,
|
||||
np.bool: SlowAppendBoolArrayToTensorProto,
|
||||
dtypes.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
|
||||
@ -240,6 +245,7 @@ _TF_TO_IS_OK = {
|
||||
dtypes.int8: _FilterInt,
|
||||
dtypes.string: _FilterStr,
|
||||
dtypes.complex64: _FilterComplex,
|
||||
dtypes.complex128: _FilterComplex,
|
||||
dtypes.int64: _FilterInt,
|
||||
dtypes.bool: _FilterBool,
|
||||
dtypes.qint32: _FilterInt,
|
||||
@ -453,6 +459,15 @@ def MakeNdarray(tensor):
|
||||
else:
|
||||
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
|
||||
dtype=dtype).reshape(shape)
|
||||
elif tensor_dtype == dtypes.complex128:
|
||||
it = iter(tensor.dcomplex_val)
|
||||
if len(tensor.dcomplex_val) == 2:
|
||||
return np.repeat(np.array(complex(tensor.dcomplex_val[0],
|
||||
tensor.dcomplex_val[1]), dtype=dtype),
|
||||
num_elements).reshape(shape)
|
||||
else:
|
||||
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
|
||||
dtype=dtype).reshape(shape)
|
||||
elif tensor_dtype == dtypes.bool:
|
||||
if len(tensor.bool_val) == 1:
|
||||
return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
|
||||
|
@ -274,7 +274,7 @@ class TensorUtilTest(tf.test.TestCase):
|
||||
self.assertEquals(np.object, a.dtype)
|
||||
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
|
||||
|
||||
def testComplex(self):
|
||||
def testComplex64(self):
|
||||
t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex64)
|
||||
self.assertProtoEquals("""
|
||||
dtype: DT_COMPLEX64
|
||||
@ -286,16 +286,30 @@ class TensorUtilTest(tf.test.TestCase):
|
||||
self.assertEquals(np.complex64, a.dtype)
|
||||
self.assertAllEqual(np.array(1 + 2j), a)
|
||||
|
||||
def testComplexWithImplicitRepeat(self):
|
||||
t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
|
||||
dtype=tf.complex64)
|
||||
def testComplex128(self):
|
||||
t = tensor_util.make_tensor_proto((1+2j), dtype=tf.complex128)
|
||||
self.assertProtoEquals("""
|
||||
dtype: DT_COMPLEX128
|
||||
tensor_shape {}
|
||||
dcomplex_val: 1
|
||||
dcomplex_val: 2
|
||||
""", t)
|
||||
a = tensor_util.MakeNdarray(t)
|
||||
self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
|
||||
[(1+1j), (1+1j), (1+1j), (1+1j)],
|
||||
[(1+1j), (1+1j), (1+1j), (1+1j)]],
|
||||
dtype=np.complex64), a)
|
||||
self.assertEquals(np.complex128, a.dtype)
|
||||
self.assertAllEqual(np.array(1 + 2j), a)
|
||||
|
||||
def testComplexN(self):
|
||||
def testComplexWithImplicitRepeat(self):
|
||||
for dtype, np_dtype in [(tf.complex64, np.complex64),
|
||||
(tf.complex128, np.complex128)]:
|
||||
t = tensor_util.make_tensor_proto((1+1j), shape=[3, 4],
|
||||
dtype=dtype)
|
||||
a = tensor_util.MakeNdarray(t)
|
||||
self.assertAllClose(np.array([[(1+1j), (1+1j), (1+1j), (1+1j)],
|
||||
[(1+1j), (1+1j), (1+1j), (1+1j)],
|
||||
[(1+1j), (1+1j), (1+1j), (1+1j)]],
|
||||
dtype=np_dtype), a)
|
||||
|
||||
def testComplex64N(self):
|
||||
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
|
||||
dtype=tf.complex64)
|
||||
self.assertProtoEquals("""
|
||||
@ -312,7 +326,24 @@ class TensorUtilTest(tf.test.TestCase):
|
||||
self.assertEquals(np.complex64, a.dtype)
|
||||
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
|
||||
|
||||
def testComplexNpArray(self):
|
||||
def testComplex128N(self):
|
||||
t = tensor_util.make_tensor_proto([(1+2j), (3+4j), (5+6j)], shape=[1, 3],
|
||||
dtype=tf.complex128)
|
||||
self.assertProtoEquals("""
|
||||
dtype: DT_COMPLEX128
|
||||
tensor_shape { dim { size: 1 } dim { size: 3 } }
|
||||
dcomplex_val: 1
|
||||
dcomplex_val: 2
|
||||
dcomplex_val: 3
|
||||
dcomplex_val: 4
|
||||
dcomplex_val: 5
|
||||
dcomplex_val: 6
|
||||
""", t)
|
||||
a = tensor_util.MakeNdarray(t)
|
||||
self.assertEquals(np.complex128, a.dtype)
|
||||
self.assertAllEqual(np.array([[(1+2j), (3+4j), (5+6j)]]), a)
|
||||
|
||||
def testComplex64NpArray(self):
|
||||
t = tensor_util.make_tensor_proto(
|
||||
np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex64)
|
||||
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
|
||||
@ -332,6 +363,26 @@ class TensorUtilTest(tf.test.TestCase):
|
||||
self.assertEquals(np.complex64, a.dtype)
|
||||
self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
|
||||
|
||||
def testComplex128NpArray(self):
|
||||
t = tensor_util.make_tensor_proto(
|
||||
np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), dtype=tf.complex128)
|
||||
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
|
||||
self.assertProtoEquals("""
|
||||
dtype: DT_COMPLEX128
|
||||
tensor_shape { dim { size: 2 } dim { size: 2 } }
|
||||
dcomplex_val: 1
|
||||
dcomplex_val: 2
|
||||
dcomplex_val: 3
|
||||
dcomplex_val: 4
|
||||
dcomplex_val: 5
|
||||
dcomplex_val: 6
|
||||
dcomplex_val: 7
|
||||
dcomplex_val: 8
|
||||
""", t)
|
||||
a = tensor_util.MakeNdarray(t)
|
||||
self.assertEquals(np.complex128, a.dtype)
|
||||
self.assertAllEqual(np.array([[(1+2j), (3+4j)], [(5+6j), (7+8j)]]), a)
|
||||
|
||||
def testUnsupportedDType(self):
|
||||
with self.assertRaises(TypeError):
|
||||
tensor_util.make_tensor_proto(np.array([1]), 0)
|
||||
|
@ -99,6 +99,9 @@ Status TfDTypeToNpDType(const DataType& tf, int* np) {
|
||||
case DT_COMPLEX64:
|
||||
*np = NPY_COMPLEX64;
|
||||
break;
|
||||
case DT_COMPLEX128:
|
||||
*np = NPY_COMPLEX128;
|
||||
break;
|
||||
case DT_STRING:
|
||||
*np = NPY_OBJECT;
|
||||
break;
|
||||
@ -210,6 +213,9 @@ Status NumericNpDTypeToTfDType(const int np, DataType* tf) {
|
||||
case NPY_COMPLEX64:
|
||||
*tf = DT_COMPLEX64;
|
||||
break;
|
||||
case NPY_COMPLEX128:
|
||||
*tf = DT_COMPLEX128;
|
||||
break;
|
||||
default:
|
||||
return errors::Unimplemented("Unsupported numpy type ", np);
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ class SessionManager(object):
|
||||
try:
|
||||
sess.run(self._ready_op)
|
||||
return None
|
||||
except errors.FailedPreconditionError, e:
|
||||
except errors.FailedPreconditionError as e:
|
||||
if "uninitialized" not in str(e):
|
||||
logging.warning("Model not ready raised: %s", str(e))
|
||||
raise e
|
||||
|
@ -7,6 +7,7 @@ COPY install/*.sh /install/
|
||||
RUN /install/install_bootstrap_deb_packages.sh
|
||||
RUN add-apt-repository -y ppa:openjdk-r/ppa
|
||||
RUN /install/install_deb_packages.sh
|
||||
RUN /install/install_pip_packages.sh
|
||||
RUN /install/install_bazel.sh
|
||||
|
||||
# Set up bazelrc.
|
||||
|
@ -7,6 +7,7 @@ COPY install/*.sh /install/
|
||||
RUN /install/install_bootstrap_deb_packages.sh
|
||||
RUN add-apt-repository -y ppa:openjdk-r/ppa
|
||||
RUN /install/install_deb_packages.sh
|
||||
RUN /install/install_pip_packages.sh
|
||||
RUN /install/install_bazel.sh
|
||||
|
||||
# Set up bazelrc.
|
||||
|
@ -22,11 +22,20 @@
|
||||
# pip.sh CONTAINER_TYPE [--test_tutorials]
|
||||
#
|
||||
# When executing the Python unit tests, the script obeys the shell
|
||||
# variables: TF_BUILD_BAZEL_CLEAN, NO_TEST_ON_INSTALL
|
||||
# variables: TF_BUILD_BAZEL_CLEAN, TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES,
|
||||
# TF_BUILD_NO_CACHING_VIRTUALENV, NO_TEST_ON_INSTALL
|
||||
#
|
||||
# TF_BUILD_BAZEL_CLEAN, if set to any non-empty and non-0 value, directs the
|
||||
# script to perform bazel clean prior to main build and test steps.
|
||||
#
|
||||
# TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES overrides the default extra pip packages
|
||||
# to be installed in virtualenv before test_installation.sh is called. Multiple
|
||||
# pakcage names are separated with spaces.
|
||||
#
|
||||
# TF_BUILD_NO_CACHING_VIRTUALENV: If set to any non-empty and non-0 value,
|
||||
# will cause the script to force remove any existing (cached) virtualenv
|
||||
# directory.
|
||||
#
|
||||
# If NO_TEST_ON_INSTALL has any non-empty and non-0 value, the test-on-install
|
||||
# part will be skipped.
|
||||
#
|
||||
@ -35,6 +44,8 @@
|
||||
# installation and the Python unit tests-on-install step.
|
||||
#
|
||||
|
||||
INSTALL_EXTRA_PIP_PACKAGES=${TF_BUILD_INSTALL_EXTRA_PIP_PACKAGES}
|
||||
|
||||
# Helper functions
|
||||
# Get the absolute path from a path
|
||||
abs_path() {
|
||||
@ -111,7 +122,7 @@ PIP_WHL_DIR="${PIP_TEST_ROOT}/whl"
|
||||
PIP_WHL_DIR=$(abs_path ${PIP_WHL_DIR}) # Get absolute path
|
||||
rm -rf ${PIP_WHL_DIR} && mkdir -p ${PIP_WHL_DIR}
|
||||
bazel-bin/tensorflow/tools/pip_package/build_pip_package ${PIP_WHL_DIR} || \
|
||||
die "build_pip_package FAILED"
|
||||
die "build_pip_package FAILED"
|
||||
|
||||
# Perform installation
|
||||
WHL_PATH=$(ls ${PIP_WHL_DIR}/tensorflow*.whl)
|
||||
@ -125,27 +136,46 @@ echo "whl file path = ${WHL_PATH}"
|
||||
# Install, in user's local home folder
|
||||
echo "Installing pip whl file: ${WHL_PATH}"
|
||||
|
||||
# Create temporary directory for install test
|
||||
# Create virtualenv directory for install test
|
||||
VENV_DIR="${PIP_TEST_ROOT}/venv"
|
||||
rm -rf "${VENV_DIR}" && mkdir -p "${VENV_DIR}"
|
||||
echo "Create directory for virtualenv: ${VENV_DIR}"
|
||||
if [[ -d "${VENV_DIR}" ]] &&
|
||||
[[ ! -z "${TF_BUILD_NO_CACHING_VIRTUALENV}" ]] &&
|
||||
[[ "${TF_BUILD_NO_CACHING_VIRTUALENV}" != "0" ]]; then
|
||||
echo "TF_BUILD_NO_CACHING_VIRTUALENV=${TF_BUILD_NO_CACHING_VIRTUALENV}:"
|
||||
echo "Removing existing virtualenv directory: ${VENV_DIR}"
|
||||
|
||||
rm -rf "${VENV_DIR}" || \
|
||||
die "Failed to remove existing virtualenv directory: ${VENV_DIR}"
|
||||
fi
|
||||
|
||||
mkdir -p ${VENV_DIR} || \
|
||||
die "FAILED to create virtualenv directory: ${VENV_DIR}"
|
||||
|
||||
# Verify that virtualenv exists
|
||||
if [[ -z $(which virtualenv) ]]; then
|
||||
die "FAILED: virtualenv not available on path"
|
||||
fi
|
||||
|
||||
virtualenv -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" ||
|
||||
die "FAILED: Unable to create virtualenv"
|
||||
virtualenv --system-site-packages -p "${PYTHON_BIN_PATH}" "${VENV_DIR}" || \
|
||||
die "FAILED: Unable to create virtualenv"
|
||||
|
||||
source "${VENV_DIR}/bin/activate" || \
|
||||
die "FAILED: Unable to activate virtualenv"
|
||||
|
||||
source "${VENV_DIR}/bin/activate" ||
|
||||
die "FAILED: Unable to activate virtualenv"
|
||||
|
||||
# Install the pip file in virtual env
|
||||
pip install -v ${WHL_PATH} \
|
||||
pip install -v --force-reinstall ${WHL_PATH} \
|
||||
&& echo "Successfully installed pip package ${WHL_PATH}" \
|
||||
|| die "pip install (without --upgrade) FAILED"
|
||||
|
||||
# Install extra pip packages required by the test-on-install
|
||||
for PACKAGE in ${INSTALL_EXTRA_PIP_PACKAGES}; do
|
||||
echo "Installing extra pip package required by test-on-install: ${PACKAGE}"
|
||||
|
||||
pip install ${PACKAGE} || \
|
||||
die "pip install ${PACKAGE} FAILED"
|
||||
done
|
||||
|
||||
# If NO_TEST_ON_INSTALL is set to any non-empty value, skip all Python
|
||||
# tests-on-install and exit right away
|
||||
if [[ ! -z "${NO_TEST_ON_INSTALL}" ]] &&
|
||||
@ -158,14 +188,14 @@ fi
|
||||
# Call test_installation.sh to perform test-on-install
|
||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
"${DIR}/test_installation.sh" --virtualenv ||
|
||||
die "PIP tests-on-install FAILED"
|
||||
"${DIR}/test_installation.sh" --virtualenv || \
|
||||
die "PIP tests-on-install FAILED"
|
||||
|
||||
# Optional: Run the tutorial tests
|
||||
if [[ "${DO_TEST_TUTORIALS}" == "1" ]]; then
|
||||
"${DIR}/test_tutorials.sh" --virtualenv ||
|
||||
die "PIP tutorial tests-on-install FAILED"
|
||||
"${DIR}/test_tutorials.sh" --virtualenv || \
|
||||
die "PIP tutorial tests-on-install FAILED"
|
||||
fi
|
||||
|
||||
deactivate ||
|
||||
die "FAILED: Unable to deactivate virtualenv"
|
||||
deactivate || \
|
||||
die "FAILED: Unable to deactivate virtualenv"
|
||||
|
@ -166,7 +166,8 @@ cp -r tensorflow/core/lib/png ${PY_TEST_DIR}/tensorflow/core/lib
|
||||
|
||||
# Run tests
|
||||
DIR0=$(pwd)
|
||||
ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} -name "*_test.py" | sort)
|
||||
ALL_PY_TESTS=$(find tensorflow/{contrib,examples,models,python,tensorboard} \
|
||||
-type f \( -name "*_test.py" -o -name "test_*.py" \) | sort)
|
||||
# TODO(cais): Add tests in tensorflow/contrib
|
||||
|
||||
PY_TEST_COUNT=$(echo ${ALL_PY_TESTS} | wc -w)
|
||||
|
@ -306,7 +306,7 @@ if [[ "${DO_DOCKER}" == "1" ]]; then
|
||||
fi
|
||||
|
||||
# Write to the tmp script
|
||||
echo "#!/bin/bash" > ${TMP_SCRIPT}
|
||||
echo "#!/usr/bin/env bash" > ${TMP_SCRIPT}
|
||||
if [[ ! -z "${TF_BUILD_BAZEL_CLEAN}" ]] &&
|
||||
[[ "${TF_BUILD_BAZEL_CLEAN}" != "0" ]]; then
|
||||
echo ${BAZEL_CLEAN_CMD} >> ${TMP_SCRIPT}
|
||||
|
@ -29,10 +29,12 @@ apt-get install -y \
|
||||
python-dev \
|
||||
python-numpy \
|
||||
python-pip \
|
||||
python-scipy \
|
||||
python-virtualenv \
|
||||
python3-dev \
|
||||
python3-numpy \
|
||||
python3-pip \
|
||||
python3-scipy \
|
||||
sudo \
|
||||
swig \
|
||||
unzip \
|
||||
|
20
tensorflow/tools/ci_build/install/install_pip_packages.sh
Executable file
20
tensorflow/tools/ci_build/install/install_pip_packages.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
set -e
|
||||
|
||||
pip install sklearn
|
||||
pip3 install scikit-learn
|
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/bin/bash -eux
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -14,6 +14,8 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
set -eux
|
||||
|
||||
TFDIR=$TEST_SRCDIR/tensorflow
|
||||
DOXYGEN=doxygen
|
||||
DOXYGEN_CONFIG="tf-doxy_for_md-config"
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -53,23 +53,21 @@ py_binary(
|
||||
|
||||
# Unit test that calls run_and_gather_logs on a benchmark, and
|
||||
# prints the result.
|
||||
cuda_py_test(
|
||||
name = "run_and_gather_logs_test",
|
||||
srcs = ["run_and_gather_logs.py"],
|
||||
additional_deps = [
|
||||
":run_and_gather_logs",
|
||||
],
|
||||
args = [
|
||||
"--test_name=" + "//tensorflow/core/kernels:cast_op_test",
|
||||
"--test_args=" + "'--benchmarks=BM_cpu_float_bfloat16'",
|
||||
"--compilation_mode='$(COMPILATION_MODE)'",
|
||||
"--cc_flags='$(CC_FLAGS)'",
|
||||
],
|
||||
data = [
|
||||
"//tensorflow/core/kernels:cast_op_test",
|
||||
],
|
||||
main = "run_and_gather_logs.py",
|
||||
)
|
||||
#cuda_py_test(
|
||||
# name = "run_and_gather_logs_test",
|
||||
# srcs = ["run_and_gather_logs.py"],
|
||||
# additional_deps = [
|
||||
# ":run_and_gather_logs",
|
||||
# ],
|
||||
# args = [
|
||||
# "--test_name=" + "//tensorflow/core/kernels:cast_op_test",
|
||||
# "--test_args=" + "'--benchmarks=BM_cpu_float'",
|
||||
# ],
|
||||
# data = [
|
||||
# "//tensorflow/core/kernels:cast_op_test",
|
||||
# ],
|
||||
# main = "run_and_gather_logs.py",
|
||||
#)
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
|
2
third_party/gpus/cuda/cuda_config.sh
vendored
2
third_party/gpus/cuda/cuda_config.sh
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
Loading…
Reference in New Issue
Block a user