Automated rollback of commit 2d42144152

PiperOrigin-RevId: 210116745
This commit is contained in:
A. Unique TensorFlower 2018-08-24 10:19:22 -07:00 committed by TensorFlower Gardener
parent ccb120b437
commit 37b2b0eb61
9 changed files with 17 additions and 19 deletions

View File

@ -22,7 +22,6 @@ import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
@ -54,7 +53,7 @@ class AdamOptimizerTest(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
if dtype == np.float16:
continue
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
@ -96,7 +95,7 @@ class AdamOptimizerTest(xla_test.XLATestCase):
def testTensorLearningRate(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
if dtype == np.float16:
continue
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)
@ -138,7 +137,7 @@ class AdamOptimizerTest(xla_test.XLATestCase):
def testSharing(self):
for dtype in self.float_types:
# TODO: test fails for float16 due to excessive precision requirements.
if dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
if dtype == np.float16:
continue
with self.test_session(), self.test_scope():
variable_scope.get_variable_scope().set_use_resource(True)

View File

@ -29,6 +29,7 @@ from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(xla_test.XLATestCase):
def initVariableAndGradient(self, dtype):
@ -195,11 +196,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]),
var0.eval(),
rtol=1e-4,
bfloat16_rtol=1e-1,
bfloat16_atol=1e-1)
np.array([-7.66718769, -10.91273689]), var0.eval(), rtol=1e-4)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]), var1.eval(), rtol=1e-4)

View File

@ -219,7 +219,7 @@ class ReduceOpPrecisionTest(xla_test.XLATestCase):
bf16_max = np.float32(dtypes.bfloat16.max)
f32_max = dtypes.float32.max
value = min(bf16_max, f32_max - bf16_max) / 2
value = min(bf16_max, f32_max - bf16_max)
self._testReduceSum(
dtypes.bfloat16.as_numpy_dtype(value), dtypes.bfloat16.as_numpy_dtype,
itertools.permutations([bf16_max, value, bf16_max * (-1.0)], 3))

View File

@ -122,10 +122,10 @@ TEST_F(LiteralUtilTest, LiteralScalarToString) {
auto bf16_lit = LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(0.5f));
ASSERT_EQ("0.5", bf16_lit->ToString());
// 3.14 will be rounded to 3.14062 in bfloat16 format.
// 3.14 will be truncated to 3.125 in bfloat16 format.
auto bf16_lit_truncated =
LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(3.14f));
ASSERT_EQ("3.14062", bf16_lit_truncated->ToString());
ASSERT_EQ("3.125", bf16_lit_truncated->ToString());
auto bf16_lit_truncated2 =
LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(9.001f));

View File

@ -935,7 +935,7 @@ TEST_P(HloEvaluatorTest, Conv2DGeneralDimensionsReversed) {
// clang-format off
// Result dimensions: [feature=1, height=1, batch=1, width=2]
Array4D<float> expected_array({{{{2514, 2685}}}});
Array4D<float> expected_array_bf16({{{{2512, 2688}}}});
Array4D<float> expected_array_bf16({{{{2512, 2672}}}});
// clang-format on
auto expected = LiteralUtil::CreateR4FromArray4D<float>(
use_bfloat16_ ? expected_array_bf16 : expected_array);
@ -1012,7 +1012,7 @@ TEST_P(HloEvaluatorTest, Conv2DGeneralDimensions) {
// clang-format off
// Result dimensions: [feature=1, height=1, batch=1, width=2]
Array4D<float> expected_array({{{{2514, 2685}}}});
Array4D<float> expected_array_bf16({{{{2512, 2688}}}});
Array4D<float> expected_array_bf16({{{{2512, 2672}}}});
// clang-format on
auto expected = LiteralUtil::CreateR4FromArray4D<float>(
use_bfloat16_ ? expected_array_bf16 : expected_array);

View File

@ -65,7 +65,7 @@ XLA_TEST_F(Bfloat16Test, LogOperation) {
Log(x);
ComputeAndCompareR0<bfloat16>(&builder, static_cast<bfloat16>(1.387f), {},
ErrorSpec(0.01, 0.01));
error_spec_);
}
XLA_TEST_F(Bfloat16Test, NegateScalarF16) {
@ -110,7 +110,7 @@ XLA_TEST_F(Bfloat16Test, BatchNormTraining) {
{static_cast<bfloat16>(5), static_cast<bfloat16>(5)})
.get()});
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.01, 0.02));
ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.01));
}
XLA_TEST_F(Bfloat16Test, BatchNormGrad) {

View File

@ -57,7 +57,7 @@ class ReduceWindowTestBase : public ClientLibraryTestBase {
public:
ErrorSpec DefaultErrorSpec() const {
if (use_bfloat16()) {
return ErrorSpec(2e-1, 6e-2);
return ErrorSpec(1e-1, 5e-2);
} else {
return ErrorSpec(1e-3, 1e-3);
}

View File

@ -61,7 +61,9 @@ struct bfloat16 {
}
B16_DEVICE_FUNC explicit bfloat16(const float v) {
value = round_to_bfloat16(v).value;
// TODO(asabne) : change the below line to
// value = round_to_bfloat16(v).value;
value = truncate_to_bfloat16(v).value;
}
B16_DEVICE_FUNC explicit bfloat16(const double val)

View File

@ -157,7 +157,7 @@ class MatMulGradientTest(test.TestCase):
m, [3, 4],
x_init_value=b.eval(),
delta=delta))
self.assertLessEqual(err, delta / 2.)
self.assertLess(err, delta / 2.)
def testGradientInput(self):
for tr_a in [True, False]: