Remove using directives. Test appertaining to ops have been moved into namespace tensorflow::ops; all other tests now use explicit using-declarations.

Some tests are now using unnamed namespaces more aggressively to make as many names internal as possible.

PiperOrigin-RevId: 180564422
This commit is contained in:
A. Unique TensorFlower 2018-01-02 10:45:56 -08:00 committed by TensorFlower Gardener
parent 127c98b9e3
commit 040b4cbce7
17 changed files with 122 additions and 63 deletions

View File

@ -23,7 +23,13 @@ limitations under the License.
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace {
using ops::Add;
using ops::Const;
using ops::Mul;
using ops::Placeholder;
using ops::Sub;
TEST(ClientSessionTest, Basic) {
Scope root = Scope::NewRootScope();
@ -89,4 +95,5 @@ TEST(ClientSessionTest, MultiThreaded) {
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({-1, 2}, {2}));
}
} // end namespace tensorflow
} // namespace
} // namespace tensorflow

View File

@ -22,8 +22,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace ops {
namespace {
Output Linear(const Scope& scope, Input x, Input w, Input b) {
@ -39,8 +38,6 @@ void GetColocationConstraints(const Output& tensor,
constraints));
}
} // namespace
TEST(CCOpTest, Basic) {
Scope root = Scope::NewRootScope();
auto c = Const(root, {{1, 1}});
@ -249,4 +246,6 @@ TEST(CCOpTest, InvalidFinalize) {
string::npos);
}
} // namespace
} // namespace ops
} // namespace tensorflow

View File

@ -24,10 +24,18 @@ limitations under the License.
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace {
using ops::Complex;
using ops::Const;
using ops::MatMul;
using ops::Placeholder;
using ops::Real;
using ops::Split;
using ops::Square;
using ops::Stack;
using ops::Unstack;
TEST(GradientCheckerTest, BasicFloat) {
Scope scope = Scope::NewRootScope();
TensorShape shape({2, 4, 3});

View File

@ -26,10 +26,20 @@ limitations under the License.
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace {
using ops::Assign;
using ops::Const;
using ops::Identity;
using ops::MatMul;
using ops::OnesLike;
using ops::Placeholder;
using ops::Square;
using ops::Stack;
using ops::StopGradient;
using ops::Unstack;
using ops::Variable;
// TODO(andydavis) Add more unit tests once more gradient functions are ported.
class GradientsTest : public ::testing::Test {
protected:

View File

@ -23,11 +23,11 @@ limitations under the License.
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using namespace ops; // NOLINT(build/namespaces)
using ops::internal::MirrorPadGrad;
namespace {
class ArrayGradTest : public ::testing::Test {
protected:
ArrayGradTest() : scope_(Scope::NewRootScope()) {}

View File

@ -23,10 +23,13 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace {
using ops::Const;
using ops::DynamicPartition;
using ops::DynamicStitch;
using ops::Placeholder;
class DataFlowGradTest : public ::testing::Test {
protected:
DataFlowGradTest() : scope_(Scope::NewRootScope()) {}

View File

@ -18,16 +18,14 @@ limitations under the License.
#include "tensorflow/cc/framework/grad_op_registry.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace test {
Status CallGradFunction(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
GradFunc grad_fn;
TF_RETURN_IF_ERROR(
GradOpRegistry::Global()->Lookup(op.node()->type_string(), &grad_fn));
ops::GradFunc grad_fn;
TF_RETURN_IF_ERROR(ops::GradOpRegistry::Global()->Lookup(
op.node()->type_string(), &grad_fn));
TF_RETURN_IF_ERROR(grad_fn(scope, op, grad_inputs, grad_outputs));
TF_RETURN_IF_ERROR(scope.status());
return Status::OK();

View File

@ -23,10 +23,31 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace {
using ops::Abs;
using ops::Add;
using ops::AddN;
using ops::BatchMatMul;
using ops::Const;
using ops::Div;
using ops::Greater;
using ops::MatMul;
using ops::Max;
using ops::Maximum;
using ops::Mean;
using ops::Min;
using ops::Minimum;
using ops::Mul;
using ops::Placeholder;
using ops::Pow;
using ops::Prod;
using ops::RealDiv;
using ops::SquaredDifference;
using ops::Sub;
using ops::Sum;
using ops::Where3;
// TODO(andydavis) Test gradient function against numeric gradients output.
// TODO(andydavis) As more gradients are added move common test functions
// to a testutil library.
@ -83,6 +104,7 @@ class CWiseUnaryGradTest : public ::testing::Test {
Output y;
switch (op_type) {
using namespace ops; // NOLINT(build/namespaces)
case ABS:
y = Abs(scope_, x);
break;

View File

@ -23,10 +23,22 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace {
using ops::BiasAdd;
using ops::Conv2D;
using ops::Elu;
using ops::L2Loss;
using ops::LogSoftmax;
using ops::LRN;
using ops::MaxPool;
using ops::MaxPoolV2;
using ops::Placeholder;
using ops::Relu;
using ops::Relu6;
using ops::Selu;
using ops::Softmax;
class NNGradTest : public ::testing::Test {
protected:
NNGradTest() : scope_(Scope::NewRootScope()) {}

View File

@ -43,8 +43,6 @@ limitations under the License.
namespace tensorflow {
using strings::StrCat;
// from graph_partition.cc
extern Status TopologicalSortNodesWithTimePriority(
const GraphDef* gdef, std::vector<std::pair<const NodeDef*, int64>>* nodes,
@ -52,6 +50,14 @@ extern Status TopologicalSortNodesWithTimePriority(
namespace {
using ops::_Recv;
using ops::_Send;
using ops::Const;
using ops::Identity;
using ops::LoopCond;
using ops::NextIteration;
using strings::StrCat;
const char gpu_device[] = "/job:a/replica:0/task:0/device:GPU:0";
string SplitByDevice(const Node* node) { return node->assigned_device_name(); }
@ -232,7 +238,6 @@ class GraphPartitionTest : public ::testing::Test {
};
TEST_F(GraphPartitionTest, SingleDevice) {
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
auto a1 = FloatInput(in_.WithOpName("A1"));
Combine(in_.WithOpName("A2"), a1, a1);
@ -245,7 +250,6 @@ TEST_F(GraphPartitionTest, SingleDevice) {
}
TEST_F(GraphPartitionTest, CrossDeviceData) {
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2"), a1, b1);
@ -267,7 +271,6 @@ TEST_F(GraphPartitionTest, CrossDeviceData) {
}
TEST_F(GraphPartitionTest, CrossDeviceControl) {
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2").WithControlDependencies(a1), b1, b1);
@ -291,7 +294,6 @@ TEST_F(GraphPartitionTest, CrossDeviceControl) {
}
TEST_F(GraphPartitionTest, CrossDeviceData_MultiUse) {
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2"), a1, b1);
@ -315,7 +317,6 @@ TEST_F(GraphPartitionTest, CrossDeviceData_MultiUse) {
}
TEST_F(GraphPartitionTest, CrossDeviceControl_MultiUse) {
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2").WithControlDependencies(a1), b1, b1);
@ -341,7 +342,6 @@ TEST_F(GraphPartitionTest, CrossDeviceControl_MultiUse) {
}
TEST_F(GraphPartitionTest, CrossDevice_DataControl) {
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
auto a1 = FloatInput(in_.WithOpName("A1"));
auto b1 = FloatInput(in_.WithOpName("B1"));
Combine(in_.WithOpName("B2"), a1, b1);
@ -372,7 +372,6 @@ TEST_F(GraphPartitionTest, CrossDevice_DataControl) {
}
TEST_F(GraphPartitionTest, CrossDeviceLoopSimple) {
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
auto a1 = BoolInput(in_.WithOpName("A1"));
auto a2 = ::tensorflow::ops::internal::Enter(in_.WithOpName("A2"), a1, "foo");
auto a3 = ::tensorflow::ops::Merge(in_.WithOpName("A3"),
@ -386,7 +385,6 @@ TEST_F(GraphPartitionTest, CrossDeviceLoopSimple) {
}
TEST_F(GraphPartitionTest, CrossDeviceLoopSimple1) {
using namespace ::tensorflow::ops; // NOLINT(build/namespaces)
auto a1 = BoolInput(in_.WithOpName("A1"));
auto a2 = ::tensorflow::ops::internal::Enter(in_.WithOpName("B2"), a1, "foo");
auto a3 = ::tensorflow::ops::Merge(in_.WithOpName("A3"),

View File

@ -32,8 +32,8 @@ limitations under the License.
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace ops {
namespace {
TEST(DecodeWavOpTest, DecodeWavTest) {
Scope root = Scope::NewRootScope();
@ -121,4 +121,6 @@ TEST(DecodeWavOpTest, DecodeWav_ShapeFn) {
INFER_ERROR("channels must be non-negative, got -2", op, "[]");
}
} // namespace
} // namespace ops
} // namespace tensorflow

View File

@ -31,8 +31,8 @@ limitations under the License.
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace ops {
namespace {
TEST(EncodeWavOpTest, EncodeWavTest) {
Scope root = Scope::DisabledShapeInferenceScope();
@ -77,4 +77,6 @@ TEST(EncodeWavOpTest, EncodeWavTest) {
EXPECT_EQ(44100, sample_rate);
}
} // namespace
} // namespace ops
} // namespace tensorflow

View File

@ -31,8 +31,8 @@ limitations under the License.
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace ops {
namespace {
TEST(MfccOpTest, SimpleTest) {
Scope root = Scope::DisabledShapeInferenceScope();
@ -74,4 +74,6 @@ TEST(MfccOpTest, SimpleTest) {
1e-3);
}
} // namespace
} // namespace ops
} // namespace tensorflow

View File

@ -32,9 +32,7 @@ limitations under the License.
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace ops {
namespace {
void TestAdd(const std::vector<int64>& x_shape,
@ -184,8 +182,6 @@ void TimeAdd(const std::vector<int64>& x_shape,
<< ", total_duration=" << total_duration;
}
} // namespace
void TestManualScalar() {
TestAdd(
{10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
@ -276,10 +272,12 @@ void BenchmarkVectorPlusTensor() {
TimeAdd({100000, 100}, {100}, 1);
}
} // end namespace tensorflow
} // namespace
} // namespace ops
} // namespace tensorflow
#define RUN_TEST(t) \
TEST(QuantizedAddOpTest, t) { tensorflow::t(); }
TEST(QuantizedAddOpTest, t) { tensorflow::ops::t(); }
RUN_TEST(TestManualScalar);
RUN_TEST(TestManualVector);

View File

@ -22,6 +22,8 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_testutil.h"
namespace tensorflow {
namespace ops {
namespace {
void ReferenceImpl(const quint8* inp, float inp_min, float inp_max,
const TensorShape& shape, float var_eps, float* out) {
@ -78,10 +80,6 @@ void ReferenceImpl(const quint8* inp, float inp_min, float inp_max,
}
}
using namespace ops; // NOLINT(build/namespaces)
namespace {
void Expect(const Tensor& input, float x_min, float x_max,
bool output_range_given, float give_y_min, float given_y_max) {
Scope root = Scope::NewRootScope();
@ -123,8 +121,6 @@ void Expect(const Tensor& input, float x_min, float x_max,
LOG(INFO) << "max diff " << max_diff();
}
} // end namespace
void TestBasic() {
Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32});
auto input = input_tensor.flat<quint8>();
@ -173,10 +169,12 @@ void TestClamp() {
Expect(input_tensor, -10.0f, 10.0f, true, 0.0f, 1.0f);
}
} // end namespace tensorflow
} // namespace
} // namespace ops
} // namespace tensorflow
#define RUN_TEST(t) \
TEST(QuantizedAddOpTest, t) { tensorflow::t(); }
TEST(QuantizedInstanceNormTest, t) { tensorflow::ops::t(); }
RUN_TEST(TestBasic);
RUN_TEST(TestZeroInput);

View File

@ -32,9 +32,7 @@ limitations under the License.
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace ops {
namespace {
void TestMul(const std::vector<int64>& x_shape,
@ -184,8 +182,6 @@ void TimeMul(const std::vector<int64>& x_shape,
<< ", total_duration=" << total_duration;
}
} // namespace
void TestManualScalar() {
TestMul(
{10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
@ -276,10 +272,12 @@ void BenchmarkVectorTimesTensor() {
TimeMul({100000, 100}, {100}, 100);
}
} // end namespace tensorflow
} // namespace
} // namespace ops
} // namespace tensorflow
#define RUN_TEST(t) \
TEST(QuantizedAddOpTest, t) { tensorflow::t(); }
TEST(QuantizedAddOpTest, t) { tensorflow::ops::t(); }
RUN_TEST(TestManualScalar);
RUN_TEST(TestManualVector);

View File

@ -31,8 +31,8 @@ limitations under the License.
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using namespace ops; // NOLINT(build/namespaces)
namespace ops {
namespace {
TEST(SpectrogramOpTest, SimpleTest) {
Scope root = Scope::NewRootScope();
@ -101,4 +101,6 @@ TEST(SpectrogramOpTest, SquaredTest) {
test::AsTensor<float>({0, 1, 4, 1, 0}, TensorShape({1, 1, 5})), 1e-3);
}
} // namespace
} // namespace ops
} // namespace tensorflow