Remove hourglass imports from kernel_tests
Change: 142080137
This commit is contained in:
parent
38a664cd96
commit
5866e065bc
@ -15,6 +15,18 @@ py_library(
|
||||
name = "bayesflow_py",
|
||||
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/contrib/distributions:distributions_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:check_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_test(
|
||||
|
@ -15,6 +15,11 @@ py_library(
|
||||
"python/util/copy_elements.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
@ -24,6 +29,7 @@ py_test(
|
||||
deps = [
|
||||
":copy_graph_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/framework:framework_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
|
@ -14,6 +14,13 @@ py_library(
|
||||
name = "crf_py",
|
||||
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:rnn",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_tests(
|
||||
|
@ -51,6 +51,14 @@ py_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":cudnn_rnn_ops",
|
||||
"//tensorflow/contrib/util:util_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
@ -61,8 +69,10 @@ cuda_py_test(
|
||||
additional_deps = [
|
||||
":cudnn_rnn_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
tags = [
|
||||
"manual",
|
||||
|
@ -17,6 +17,7 @@ cuda_py_tests(
|
||||
additional_deps = [
|
||||
":distributions_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
@ -100,6 +101,22 @@ py_library(
|
||||
name = "distributions_py",
|
||||
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/contrib/framework:framework_py",
|
||||
"//tensorflow/contrib/linalg:linalg_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:check_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:data_flow_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:linalg_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:random_ops",
|
||||
"//tensorflow/python:special_math_ops",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_tests(
|
||||
@ -142,6 +159,7 @@ cuda_py_tests(
|
||||
additional_deps = [
|
||||
":distributions_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
@ -324,6 +342,7 @@ cuda_py_tests(
|
||||
size = "small",
|
||||
srcs = ["python/kernel_tests/kullback_leibler_test.py"],
|
||||
additional_deps = [
|
||||
":distributions_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
@ -369,6 +388,7 @@ cuda_py_tests(
|
||||
additional_deps = [
|
||||
":distributions_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
@ -381,6 +401,7 @@ cuda_py_tests(
|
||||
additional_deps = [
|
||||
":distributions_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
|
@ -30,6 +30,13 @@ py_library(
|
||||
deps = [
|
||||
":gen_clustering_ops",
|
||||
":gen_factorization_ops",
|
||||
"//tensorflow/contrib/learn",
|
||||
"//tensorflow/contrib/util:util_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:embedding_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
@ -105,8 +112,10 @@ tf_py_test(
|
||||
"python/ops/gmm_ops_test.py",
|
||||
],
|
||||
additional_deps = [
|
||||
":factorization_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
)
|
||||
|
@ -121,6 +121,10 @@ py_library(
|
||||
deps = [
|
||||
":decode_audio_op_py",
|
||||
":encode_audio_op_py",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -32,6 +32,23 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":gen_variable_ops",
|
||||
"//tensorflow/contrib/util:util_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:init_ops",
|
||||
"//tensorflow/python:io_ops",
|
||||
"//tensorflow/python:logging_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:state_ops_gen",
|
||||
"//tensorflow/python:tensor_array_ops",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python:variable_scope",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -104,7 +121,11 @@ py_test(
|
||||
name = "experimental_test",
|
||||
srcs = ["python/framework/experimental_test.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = ["//tensorflow:tensorflow_py"],
|
||||
deps = [
|
||||
":framework_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
|
@ -20,6 +20,10 @@ py_library(
|
||||
"util.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
@ -14,6 +14,12 @@ py_library(
|
||||
name = "grid_rnn_py",
|
||||
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_tests(
|
||||
|
@ -45,7 +45,15 @@ py_library(
|
||||
],
|
||||
data = [":python/ops/_image_ops.so"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [":image_ops"],
|
||||
deps = [
|
||||
":image_ops",
|
||||
"//tensorflow/contrib/util:util_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_test(
|
||||
|
@ -55,7 +55,14 @@ py_library(
|
||||
srcs = glob(["python/ops/*.py"]),
|
||||
data = [":python/ops/_input_pipeline_ops.so"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [":input_pipeline_ops"],
|
||||
deps = [
|
||||
":input_pipeline_ops",
|
||||
"//tensorflow/contrib/util:util_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
@ -68,6 +75,8 @@ py_test(
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -14,6 +14,14 @@ py_library(
|
||||
"python/ops/odes.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:tensor_array_ops",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
|
@ -33,6 +33,9 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":_typecheck",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
],
|
||||
)
|
||||
|
||||
@ -55,6 +58,7 @@ py_test(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":_typecheck",
|
||||
":core",
|
||||
":test_util",
|
||||
"//tensorflow:tensorflow_py",
|
||||
@ -66,7 +70,11 @@ py_library(
|
||||
srcs = ["python/ops/io_ops.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":_typecheck",
|
||||
":core",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:parsing_ops",
|
||||
],
|
||||
)
|
||||
|
||||
@ -78,6 +86,7 @@ py_test(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":core",
|
||||
":io_ops",
|
||||
":ops",
|
||||
":test_util",
|
||||
@ -91,6 +100,7 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":core",
|
||||
"//tensorflow/python:nn",
|
||||
],
|
||||
)
|
||||
|
||||
@ -102,6 +112,7 @@ py_test(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":core",
|
||||
":nn",
|
||||
":test_util",
|
||||
"//tensorflow:tensorflow_py",
|
||||
@ -113,7 +124,14 @@ py_library(
|
||||
srcs = ["python/ops/ops.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":_typecheck",
|
||||
":core",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:numerics",
|
||||
"//tensorflow/python:random_ops",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
@ -125,6 +143,7 @@ py_test(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":core",
|
||||
":ops",
|
||||
":test_util",
|
||||
"//tensorflow:tensorflow_py",
|
||||
@ -136,8 +155,10 @@ py_library(
|
||||
srcs = ["python/ops/sugar.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":_typecheck",
|
||||
":core",
|
||||
":ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
],
|
||||
)
|
||||
|
||||
@ -149,6 +170,8 @@ py_test(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":core",
|
||||
":ops",
|
||||
":sugar",
|
||||
":test_util",
|
||||
"//tensorflow:tensorflow_py",
|
||||
|
@ -99,6 +99,33 @@ py_library(
|
||||
deps = [
|
||||
":bucketization_op",
|
||||
":sparse_feature_cross_op",
|
||||
"//tensorflow/contrib/framework:framework_py",
|
||||
"//tensorflow/contrib/lookup:lookup_py",
|
||||
"//tensorflow/contrib/losses:losses_py",
|
||||
"//tensorflow/contrib/metrics:metrics_py",
|
||||
"//tensorflow/contrib/util:util_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:check_ops",
|
||||
"//tensorflow/python:clip_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:embedding_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:init_ops",
|
||||
"//tensorflow/python:layers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:parsing_ops",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:random_ops",
|
||||
"//tensorflow/python:sparse_ops",
|
||||
"//tensorflow/python:standard_ops",
|
||||
"//tensorflow/python:string_ops",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python:variable_scope",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -109,6 +136,7 @@ cuda_py_test(
|
||||
additional_deps = [
|
||||
":layers_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
@ -187,6 +215,7 @@ py_test(
|
||||
":layers_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:init_ops",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
)
|
||||
|
@ -20,9 +20,9 @@ from __future__ import print_function
|
||||
|
||||
import six
|
||||
|
||||
from tensorflow.contrib import losses
|
||||
from tensorflow.contrib import metrics as metrics_lib
|
||||
from tensorflow.contrib.framework import deprecated
|
||||
from tensorflow.contrib.losses.python.losses import loss_ops
|
||||
from tensorflow.contrib.metrics.python.ops import metric_ops
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
@ -31,8 +31,7 @@ from tensorflow.python.ops import nn
|
||||
|
||||
|
||||
@deprecated(
|
||||
"2016-11-12",
|
||||
"This file will be removed after the deprecation date."
|
||||
"2016-11-12", "This file will be removed after the deprecation date."
|
||||
"Please switch to "
|
||||
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
|
||||
def regression_target(label_name=None,
|
||||
@ -51,17 +50,18 @@ def regression_target(label_name=None,
|
||||
Returns:
|
||||
An instance of _TargetColumn
|
||||
"""
|
||||
return _RegressionTargetColumn(loss_fn=_mean_squared_loss,
|
||||
label_name=label_name,
|
||||
weight_column_name=weight_column_name,
|
||||
label_dimension=label_dimension)
|
||||
return _RegressionTargetColumn(
|
||||
loss_fn=_mean_squared_loss,
|
||||
label_name=label_name,
|
||||
weight_column_name=weight_column_name,
|
||||
label_dimension=label_dimension)
|
||||
|
||||
|
||||
# TODO(zakaria): Add logistic_regression_target
|
||||
|
||||
|
||||
@deprecated(
|
||||
"2016-11-12",
|
||||
"This file will be removed after the deprecation date."
|
||||
"2016-11-12", "This file will be removed after the deprecation date."
|
||||
"Please switch to "
|
||||
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
|
||||
def multi_class_target(n_classes, label_name=None, weight_column_name=None):
|
||||
@ -89,15 +89,15 @@ def multi_class_target(n_classes, label_name=None, weight_column_name=None):
|
||||
loss_fn = _log_loss_with_two_classes
|
||||
else:
|
||||
loss_fn = _softmax_cross_entropy_loss
|
||||
return _MultiClassTargetColumn(loss_fn=loss_fn,
|
||||
n_classes=n_classes,
|
||||
label_name=label_name,
|
||||
weight_column_name=weight_column_name)
|
||||
return _MultiClassTargetColumn(
|
||||
loss_fn=loss_fn,
|
||||
n_classes=n_classes,
|
||||
label_name=label_name,
|
||||
weight_column_name=weight_column_name)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"2016-11-12",
|
||||
"This file will be removed after the deprecation date."
|
||||
"2016-11-12", "This file will be removed after the deprecation date."
|
||||
"Please switch to "
|
||||
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
|
||||
def binary_svm_target(label_name=None, weight_column_name=None):
|
||||
@ -116,13 +116,12 @@ def binary_svm_target(label_name=None, weight_column_name=None):
|
||||
An instance of _TargetColumn.
|
||||
|
||||
"""
|
||||
return _BinarySvmTargetColumn(label_name=label_name,
|
||||
weight_column_name=weight_column_name)
|
||||
return _BinarySvmTargetColumn(
|
||||
label_name=label_name, weight_column_name=weight_column_name)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"2016-11-12",
|
||||
"This file will be removed after the deprecation date."
|
||||
"2016-11-12", "This file will be removed after the deprecation date."
|
||||
"Please switch to "
|
||||
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
|
||||
class ProblemType(object):
|
||||
@ -148,8 +147,8 @@ class _TargetColumn(object):
|
||||
ValueError: if loss_fn or n_classes are missing.
|
||||
"""
|
||||
|
||||
def __init__(self, loss_fn, num_label_columns, label_name,
|
||||
weight_column_name, problem_type):
|
||||
def __init__(self, loss_fn, num_label_columns, label_name, weight_column_name,
|
||||
problem_type):
|
||||
if not loss_fn:
|
||||
raise ValueError("loss_fn must be provided")
|
||||
if num_label_columns is None: # n_classes can be 0
|
||||
@ -186,8 +185,7 @@ class _TargetColumn(object):
|
||||
return None
|
||||
else:
|
||||
return array_ops.reshape(
|
||||
math_ops.to_float(features[self._weight_column_name]),
|
||||
shape=(-1,))
|
||||
math_ops.to_float(features[self._weight_column_name]), shape=(-1,))
|
||||
|
||||
@property
|
||||
def problem_type(self):
|
||||
@ -254,10 +252,9 @@ class _TargetColumn(object):
|
||||
if weight_tensor is None:
|
||||
return math_ops.reduce_mean(loss_unweighted, name="loss")
|
||||
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
|
||||
return math_ops.div(
|
||||
math_ops.reduce_sum(loss_weighted),
|
||||
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
|
||||
name="loss")
|
||||
return math_ops.div(math_ops.reduce_sum(loss_weighted),
|
||||
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
|
||||
name="loss")
|
||||
|
||||
|
||||
class _RegressionTargetColumn(_TargetColumn):
|
||||
@ -278,11 +275,12 @@ class _RegressionTargetColumn(_TargetColumn):
|
||||
|
||||
def get_eval_ops(self, features, logits, labels, metrics=None):
|
||||
loss = self.loss(logits, labels, features)
|
||||
result = {"loss": metrics_lib.streaming_mean(loss)}
|
||||
result = {"loss": metric_ops.streaming_mean(loss)}
|
||||
if metrics:
|
||||
predictions = self.logits_to_predictions(logits, proba=False)
|
||||
result.update(_run_metrics(predictions, labels, metrics,
|
||||
self.get_weight_tensor(features)))
|
||||
result.update(
|
||||
_run_metrics(predictions, labels, metrics,
|
||||
self.get_weight_tensor(features)))
|
||||
return result
|
||||
|
||||
|
||||
@ -316,13 +314,13 @@ class _MultiClassTargetColumn(_TargetColumn):
|
||||
|
||||
def get_eval_ops(self, features, logits, labels, metrics=None):
|
||||
loss = self.loss(logits, labels, features)
|
||||
result = {"loss": metrics_lib.streaming_mean(loss)}
|
||||
result = {"loss": metric_ops.streaming_mean(loss)}
|
||||
|
||||
# Adds default metrics.
|
||||
if metrics is None:
|
||||
# TODO(b/29366811): This currently results in both an "accuracy" and an
|
||||
# "accuracy/threshold_0.500000_mean" metric for binary classification.
|
||||
metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy}
|
||||
metrics = {("accuracy", "classes"): metric_ops.streaming_accuracy}
|
||||
|
||||
predictions = math_ops.sigmoid(logits)
|
||||
labels_float = math_ops.to_float(labels)
|
||||
@ -354,12 +352,14 @@ class _MultiClassTargetColumn(_TargetColumn):
|
||||
"form.".format(name))
|
||||
if class_metrics:
|
||||
class_predictions = self.logits_to_predictions(logits, proba=False)
|
||||
result.update(_run_metrics(class_predictions, labels, class_metrics,
|
||||
self.get_weight_tensor(features)))
|
||||
result.update(
|
||||
_run_metrics(class_predictions, labels, class_metrics,
|
||||
self.get_weight_tensor(features)))
|
||||
if proba_metrics:
|
||||
predictions = self.logits_to_predictions(logits, proba=True)
|
||||
result.update(_run_metrics(predictions, labels, proba_metrics,
|
||||
self.get_weight_tensor(features)))
|
||||
result.update(
|
||||
_run_metrics(predictions, labels, proba_metrics,
|
||||
self.get_weight_tensor(features)))
|
||||
return result
|
||||
|
||||
|
||||
@ -367,6 +367,7 @@ class _BinarySvmTargetColumn(_MultiClassTargetColumn):
|
||||
"""_TargetColumn for binary classification using SVMs."""
|
||||
|
||||
def __init__(self, label_name, weight_column_name):
|
||||
|
||||
def loss_fn(logits, target):
|
||||
check_shape_op = control_flow_ops.Assert(
|
||||
math_ops.less_equal(array_ops.rank(target), 2),
|
||||
@ -374,7 +375,7 @@ class _BinarySvmTargetColumn(_MultiClassTargetColumn):
|
||||
with ops.control_dependencies([check_shape_op]):
|
||||
target = array_ops.reshape(
|
||||
target, shape=[array_ops.shape(target)[0], 1])
|
||||
return losses.hinge_loss(logits, target)
|
||||
return loss_ops.hinge_loss(logits, target)
|
||||
|
||||
super(_BinarySvmTargetColumn, self).__init__(
|
||||
loss_fn=loss_fn,
|
||||
@ -435,8 +436,7 @@ def _run_metrics(predictions, labels, metrics, weights):
|
||||
|
||||
|
||||
@deprecated(
|
||||
"2016-11-12",
|
||||
"This file will be removed after the deprecation date."
|
||||
"2016-11-12", "This file will be removed after the deprecation date."
|
||||
"Please switch to "
|
||||
"third_party/tensorflow/contrib/learn/python/learn/estimators/head.py")
|
||||
def get_default_binary_metrics_for_eval(thresholds):
|
||||
@ -459,14 +459,14 @@ def get_default_binary_metrics_for_eval(thresholds):
|
||||
metrics[_MetricKeys.AUC] = _streaming_auc
|
||||
|
||||
for threshold in thresholds:
|
||||
metrics[_MetricKeys.ACCURACY_MEAN % threshold] = _accuracy_at_threshold(
|
||||
threshold)
|
||||
metrics[_MetricKeys.ACCURACY_MEAN %
|
||||
threshold] = _accuracy_at_threshold(threshold)
|
||||
# Precision for positive examples.
|
||||
metrics[_MetricKeys.PRECISION_MEAN % threshold] = _streaming_at_threshold(
|
||||
metrics_lib.streaming_precision_at_thresholds, threshold)
|
||||
metric_ops.streaming_precision_at_thresholds, threshold)
|
||||
# Recall for positive examples.
|
||||
metrics[_MetricKeys.RECALL_MEAN % threshold] = _streaming_at_threshold(
|
||||
metrics_lib.streaming_recall_at_thresholds, threshold)
|
||||
metric_ops.streaming_recall_at_thresholds, threshold)
|
||||
|
||||
return metrics
|
||||
|
||||
@ -478,16 +478,16 @@ def _float_weights_or_none(weights):
|
||||
|
||||
|
||||
def _labels_streaming_mean(unused_predictions, labels, weights=None):
|
||||
return metrics_lib.streaming_mean(labels, weights=weights)
|
||||
return metric_ops.streaming_mean(labels, weights=weights)
|
||||
|
||||
|
||||
def _predictions_streaming_mean(predictions, unused_labels, weights=None):
|
||||
return metrics_lib.streaming_mean(predictions, weights=weights)
|
||||
return metric_ops.streaming_mean(predictions, weights=weights)
|
||||
|
||||
|
||||
def _streaming_auc(predictions, labels, weights=None):
|
||||
return metrics_lib.streaming_auc(predictions, labels,
|
||||
weights=_float_weights_or_none(weights))
|
||||
return metric_ops.streaming_auc(
|
||||
predictions, labels, weights=_float_weights_or_none(weights))
|
||||
|
||||
|
||||
def _accuracy_at_threshold(threshold):
|
||||
@ -495,9 +495,8 @@ def _accuracy_at_threshold(threshold):
|
||||
def _accuracy_metric(predictions, labels, weights=None):
|
||||
threshold_predictions = math_ops.to_float(
|
||||
math_ops.greater_equal(predictions, threshold))
|
||||
return metrics_lib.streaming_accuracy(predictions=threshold_predictions,
|
||||
labels=labels,
|
||||
weights=weights)
|
||||
return metric_ops.streaming_accuracy(
|
||||
predictions=threshold_predictions, labels=labels, weights=weights)
|
||||
|
||||
return _accuracy_metric
|
||||
|
||||
@ -506,7 +505,9 @@ def _streaming_at_threshold(streaming_metrics_fn, threshold):
|
||||
|
||||
def _streaming_metrics(predictions, labels, weights=None):
|
||||
precision_tensor, update_op = streaming_metrics_fn(
|
||||
predictions, labels=labels, thresholds=[threshold],
|
||||
predictions,
|
||||
labels=labels,
|
||||
thresholds=[threshold],
|
||||
weights=_float_weights_or_none(weights))
|
||||
return array_ops.squeeze(precision_tensor), update_op
|
||||
|
||||
|
@ -18,12 +18,48 @@ py_library(
|
||||
),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/contrib/framework:framework_py",
|
||||
"//tensorflow/contrib/layers:layers_py",
|
||||
"//tensorflow/contrib/learn/python/learn/datasets",
|
||||
"//tensorflow/contrib/linear_optimizer:sdca_ops_py",
|
||||
"//tensorflow/contrib/losses:losses_py",
|
||||
"//tensorflow/contrib/session_bundle:exporter",
|
||||
"//tensorflow/contrib/session_bundle:gc",
|
||||
"//tensorflow/contrib/tensor_forest:client_lib",
|
||||
"//tensorflow/contrib/tensor_forest:data_ops_py",
|
||||
"//tensorflow/contrib/tensor_forest:eval_metrics",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:check_ops",
|
||||
"//tensorflow/python:clip_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:data_flow_ops",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:functional_ops",
|
||||
"//tensorflow/python:gradients",
|
||||
"//tensorflow/python:init_ops",
|
||||
"//tensorflow/python:io_ops",
|
||||
"//tensorflow/python:logging_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn",
|
||||
"//tensorflow/python:parsing_ops",
|
||||
"//tensorflow/python:partitioned_variables",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:resources",
|
||||
"//tensorflow/python:rnn",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:sparse_ops",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:string_ops",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python:variable_scope",
|
||||
"//tensorflow/python:variables",
|
||||
"//tensorflow/python/saved_model:builder",
|
||||
"//tensorflow/python/saved_model:loader",
|
||||
"//tensorflow/python/saved_model:signature_constants",
|
||||
"//tensorflow/python/saved_model:signature_def_utils",
|
||||
"//tensorflow/python/saved_model:tag_constants",
|
||||
],
|
||||
@ -89,6 +125,7 @@ py_test(
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
@ -126,6 +163,7 @@ py_test(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
@ -200,6 +238,7 @@ py_test(
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
@ -285,6 +324,7 @@ py_test(
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
)
|
||||
|
||||
@ -297,8 +337,12 @@ py_test(
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:extra_py_tests_deps",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:resources",
|
||||
"//tensorflow/python:test_ops",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -323,6 +367,8 @@ py_test(
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
@ -358,7 +404,11 @@ py_test(
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python/saved_model:loader",
|
||||
"//tensorflow/python/saved_model:tag_constants",
|
||||
],
|
||||
)
|
||||
|
||||
@ -426,6 +476,7 @@ py_test(
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:math_ops",
|
||||
],
|
||||
)
|
||||
|
||||
@ -454,6 +505,7 @@ py_test(
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_benchmark",
|
||||
],
|
||||
)
|
||||
|
||||
@ -502,7 +554,10 @@ py_test(
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/framework:framework_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:state_ops",
|
||||
],
|
||||
)
|
||||
|
||||
@ -646,8 +701,11 @@ py_test(
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
@ -659,6 +717,7 @@ py_test(
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
@ -671,6 +730,7 @@ py_test(
|
||||
deps = [
|
||||
":learn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
@ -701,6 +761,7 @@ py_test(
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
@ -714,6 +775,8 @@ py_test(
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python/saved_model:signature_constants",
|
||||
"//tensorflow/python/saved_model:signature_def_utils",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -22,6 +22,10 @@ py_library(
|
||||
],
|
||||
data = [":data_csv"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
py_binary(
|
||||
@ -54,6 +58,7 @@ py_test(
|
||||
srcs = ["base_test.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":datasets",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/learn",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
|
@ -21,6 +21,18 @@ py_library(
|
||||
),
|
||||
srcs_version = "PY2AND3",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:embedding_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:rnn",
|
||||
"//tensorflow/python:rnn_cell",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_tests(
|
||||
|
@ -93,6 +93,15 @@ py_library(
|
||||
name = "linalg_py",
|
||||
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:check_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:linalg_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
@ -19,6 +19,15 @@ py_library(
|
||||
deps = [
|
||||
":sharded_mutable_dense_hashtable_py",
|
||||
":sparse_feature_column_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:sdca_ops_gen",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -29,9 +38,11 @@ py_test(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":sdca_ops_py",
|
||||
":sparse_feature_column_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:sdca_ops_gen",
|
||||
],
|
||||
)
|
||||
|
||||
@ -41,6 +52,11 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/contrib/lookup:lookup_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:data_flow_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
],
|
||||
)
|
||||
|
||||
@ -61,6 +77,7 @@ py_library(
|
||||
name = "sparse_feature_column_py",
|
||||
srcs = ["python/ops/sparse_feature_column.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = ["//tensorflow/python:framework_for_generated_wrappers"],
|
||||
)
|
||||
|
||||
py_test(
|
||||
|
@ -14,6 +14,17 @@ py_library(
|
||||
"lookup_ops.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:data_flow_ops_gen",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:string_ops",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
|
@ -15,6 +15,15 @@ py_library(
|
||||
"python/losses/loss_ops.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/contrib/framework:framework_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
|
@ -21,9 +21,25 @@ py_library(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/contrib/framework:framework_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:check_ops",
|
||||
"//tensorflow/python:confusion_matrix",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:histogram_ops",
|
||||
"//tensorflow/python:init_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:metrics",
|
||||
"//tensorflow/python:nn",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:sets",
|
||||
"//tensorflow/python:sparse_ops",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python:variable_scope",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -43,6 +43,7 @@ tf_py_test(
|
||||
additional_deps = [
|
||||
":ndlstm",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
],
|
||||
)
|
||||
|
||||
@ -52,6 +53,7 @@ tf_py_test(
|
||||
additional_deps = [
|
||||
":ndlstm",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
# "//tensorflow:tensorflow_py:tensorflow_google",
|
||||
],
|
||||
)
|
||||
|
@ -16,6 +16,16 @@ py_library(
|
||||
"python/training/variable_clipping_optimizer.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:clip_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:gradients",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
|
@ -22,6 +22,7 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":ops",
|
||||
"//tensorflow/python:array_ops_gen",
|
||||
],
|
||||
)
|
||||
|
||||
@ -35,8 +36,13 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:array_ops_gen",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:math_ops_gen",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:nn_ops_gen",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -31,6 +31,21 @@ py_library(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//tensorflow/contrib/layers:layers_py",
|
||||
"//tensorflow/contrib/util:util_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:clip_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:init_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:rnn",
|
||||
"//tensorflow/python:rnn_cell",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_tests(
|
||||
@ -51,6 +66,7 @@ cuda_py_tests(
|
||||
srcs = ["python/kernel_tests/core_rnn_cell_test.py"],
|
||||
additional_deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:rnn_cell",
|
||||
],
|
||||
)
|
||||
|
||||
@ -72,6 +88,7 @@ cuda_py_tests(
|
||||
srcs = ["python/kernel_tests/core_rnn_test.py"],
|
||||
additional_deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
shard_count = 10,
|
||||
)
|
||||
@ -147,6 +164,7 @@ cuda_py_tests(
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -12,13 +12,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""RNN helpers for TensorFlow models."""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import rnn
|
||||
from tensorflow.python.ops import variable_scope as vs
|
||||
|
||||
|
||||
@ -105,7 +105,7 @@ def stack_bidirectional_rnn(cells_fw,
|
||||
initial_state_bw = initial_states_bw[i]
|
||||
|
||||
with vs.variable_scope("cell_%d" % i) as cell_scope:
|
||||
prev_layer, state_fw, state_bw = tf.nn.bidirectional_rnn(
|
||||
prev_layer, state_fw, state_bw = rnn.bidirectional_rnn(
|
||||
cell_fw,
|
||||
cell_bw,
|
||||
prev_layer,
|
||||
@ -203,7 +203,7 @@ def stack_bidirectional_dynamic_rnn(cells_fw,
|
||||
initial_state_bw = initial_states_bw[i]
|
||||
|
||||
with vs.variable_scope("cell_%d" % i):
|
||||
outputs, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
|
||||
outputs, (state_fw, state_bw) = rnn.bidirectional_dynamic_rnn(
|
||||
cell_fw,
|
||||
cell_bw,
|
||||
prev_layer,
|
||||
@ -212,7 +212,7 @@ def stack_bidirectional_dynamic_rnn(cells_fw,
|
||||
sequence_length=sequence_length,
|
||||
dtype=dtype)
|
||||
# Concat the outputs to create the new input.
|
||||
prev_layer = tf.concat_v2(outputs, 2)
|
||||
prev_layer = array_ops.concat_v2(outputs, 2)
|
||||
states_fw.append(state_fw)
|
||||
states_bw.append(state_bw)
|
||||
|
||||
|
@ -13,6 +13,17 @@ py_library(
|
||||
name = "seq2seq_py",
|
||||
srcs = ["__init__.py"] + glob(["python/ops/*.py"]),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:rnn",
|
||||
"//tensorflow/python:rnn_cell",
|
||||
"//tensorflow/python:tensor_array_ops",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_test(
|
||||
|
@ -51,6 +51,7 @@ py_library(
|
||||
":session_bundle_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/core:protos_all_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python/saved_model:constants",
|
||||
"//tensorflow/python/saved_model:loader",
|
||||
"//tensorflow/python/saved_model:signature_constants",
|
||||
@ -72,6 +73,12 @@ py_test(
|
||||
tags = ["manual"],
|
||||
deps = [
|
||||
":bundle_shim_py",
|
||||
":constants",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python/saved_model:constants",
|
||||
"//tensorflow/python/saved_model:signature_constants",
|
||||
"//tensorflow/python/saved_model:tag_constants",
|
||||
],
|
||||
)
|
||||
|
||||
@ -90,6 +97,10 @@ py_library(
|
||||
":gc",
|
||||
":manifest_proto_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
)
|
||||
|
||||
@ -107,6 +118,7 @@ py_test(
|
||||
":gc",
|
||||
":manifest_proto_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
@ -116,6 +128,7 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
@ -129,6 +142,8 @@ py_test(
|
||||
deps = [
|
||||
":gc",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
@ -213,6 +228,7 @@ py_library(
|
||||
":manifest_proto_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/core:protos_all_py",
|
||||
"//tensorflow/python:lib",
|
||||
],
|
||||
)
|
||||
|
||||
@ -234,6 +250,8 @@ py_test(
|
||||
":session_bundle_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/core:protos_all_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -73,6 +73,15 @@ py_library(
|
||||
"data/data_ops.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":constants",
|
||||
":tensor_forest_ops_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:sparse_ops",
|
||||
],
|
||||
)
|
||||
|
||||
tf_gen_op_libs(
|
||||
@ -114,7 +123,15 @@ py_library(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":constants",
|
||||
":gen_tensor_forest_ops",
|
||||
"//tensorflow/contrib/util:util_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:sparse_ops",
|
||||
],
|
||||
)
|
||||
|
||||
@ -122,6 +139,12 @@ py_library(
|
||||
name = "eval_metrics",
|
||||
srcs = ["client/eval_metrics.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/contrib/metrics:metrics_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
@ -274,6 +297,18 @@ py_library(
|
||||
":constants",
|
||||
":data_ops_py",
|
||||
":tensor_forest_ops_py",
|
||||
"//tensorflow/contrib/losses:losses_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:init_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:random_ops",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:variable_scope",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -326,9 +361,11 @@ py_test(
|
||||
srcs_version = "PY2AND3",
|
||||
tags = ["manual"],
|
||||
deps = [
|
||||
":tensor_forest_ops_py",
|
||||
":topn_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:session",
|
||||
],
|
||||
)
|
||||
|
@ -82,6 +82,13 @@ py_library(
|
||||
"python/ops/_training_ops.so",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
py_library(
|
||||
@ -100,6 +107,7 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
],
|
||||
)
|
||||
|
||||
@ -114,6 +122,9 @@ py_test(
|
||||
":fully_connected_layer",
|
||||
":hybrid_layer",
|
||||
":hybrid_model",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
)
|
||||
|
||||
@ -125,6 +136,12 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -137,6 +154,8 @@ py_library(
|
||||
deps = [
|
||||
":hybrid_layer",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
],
|
||||
)
|
||||
|
||||
@ -161,6 +180,7 @@ py_test(
|
||||
deps = [
|
||||
":ops_lib",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
@ -177,6 +197,10 @@ py_library(
|
||||
":ops_lib",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:init_ops",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
@ -190,6 +214,10 @@ py_test(
|
||||
":decisions_to_data_layer",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
@ -206,6 +234,7 @@ py_library(
|
||||
":ops_lib",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
@ -222,6 +251,8 @@ py_library(
|
||||
":ops_lib",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:nn_ops",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
@ -234,6 +265,10 @@ py_test(
|
||||
":decisions_to_data_then_nn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
@ -250,6 +285,7 @@ py_library(
|
||||
":ops_lib",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
@ -262,6 +298,10 @@ py_test(
|
||||
":k_feature_decisions_to_data_then_nn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
@ -278,6 +318,7 @@ py_library(
|
||||
":ops_lib",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
@ -290,6 +331,10 @@ py_test(
|
||||
":forest_to_data_then_nn",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:variable_scope",
|
||||
],
|
||||
)
|
||||
|
||||
@ -305,6 +350,7 @@ py_library(
|
||||
":ops_lib",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
@ -322,6 +368,7 @@ py_library(
|
||||
":ops_lib",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/tensor_forest:tensor_forest_py",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -15,6 +15,10 @@ py_library(
|
||||
"python/framework/util_test.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:summary",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
@ -14,5 +14,6 @@ py_library(
|
||||
deps = [
|
||||
"//tensorflow/contrib/tfprof/python/tools/tfprof:model_analyzer",
|
||||
"//tensorflow/contrib/tfprof/python/tools/tfprof:tfprof_logger",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
)
|
||||
|
@ -24,10 +24,27 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//tensorflow/contrib/framework:framework_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:check_ops",
|
||||
"//tensorflow/python:clip_ops",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:data_flow_ops",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:logging_ops",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:ops",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:random_ops",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:string_ops",
|
||||
"//tensorflow/python:tensor_array_ops",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:util",
|
||||
"//tensorflow/python:variable_scope",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -113,6 +130,7 @@ py_test(
|
||||
":training_py",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -66,7 +66,11 @@ py_library(
|
||||
name = "util_py",
|
||||
srcs = glob(["**/*.py"]),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [],
|
||||
deps = [
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
@ -5,7 +5,7 @@
|
||||
# Public targets:
|
||||
#
|
||||
# ":protos_all" - exports all core TensorFlow protos
|
||||
# ":protos_all_py_pb2" - py_proto_library version (Google-internal)
|
||||
# ":protos_all_py" - py_proto_library version (Google-internal)
|
||||
# ":lib" - exports the public non-test headers for:
|
||||
# platform/: Platform-specific code and external dependencies
|
||||
# lib/: Low-level libraries that are not TensorFlow-specific
|
||||
|
@ -13,6 +13,7 @@ py_binary(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/learn/python/learn/datasets",
|
||||
"//tensorflow/examples/tutorials/mnist:input_data",
|
||||
],
|
||||
)
|
||||
|
@ -14,6 +14,10 @@ py_binary(
|
||||
visibility = ["//tensorflow:__subpackages__"],
|
||||
deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:util",
|
||||
],
|
||||
)
|
||||
|
||||
@ -26,6 +30,7 @@ py_test(
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":retrain",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
|
@ -23,7 +23,10 @@ py_library(
|
||||
srcs = ["input_data.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//tensorflow:tensorflow_py"],
|
||||
deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/learn/python/learn/datasets",
|
||||
],
|
||||
)
|
||||
|
||||
py_library(
|
||||
|
@ -20,6 +20,7 @@ py_binary(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/contrib/learn",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -59,6 +59,9 @@ py_library(
|
||||
deps = [
|
||||
":zero_out_op_2",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:sparse_ops",
|
||||
],
|
||||
)
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -3,24 +3,27 @@
|
||||
load("//tensorflow:tensorflow.bzl", "tf_gen_op_wrapper_py")
|
||||
|
||||
# Intended only for use within this directory.
|
||||
# Generated python wrappers are private visibility, users should depend on the
|
||||
# Generated python wrappers are "private" visibility, users should depend on the
|
||||
# full python code that incorporates the wrappers. The generated targets have
|
||||
# a _gen suffix, so that the full python version can use the bare name.
|
||||
# We also hard code the hidden_file here to reduce duplication.
|
||||
#
|
||||
# We should consider moving the "out" default pattern into here, many other
|
||||
# consumers of the tf_gen_op_wrapper_py rule would be simplified if we don't
|
||||
# consumers of the tf_gen_op_wrapper_py rule would be simplified if we don't
|
||||
# hard code the ops/ directory.
|
||||
|
||||
def tf_gen_op_wrapper_private_py(name, out=None, deps=[],
|
||||
require_shape_functions=False):
|
||||
require_shape_functions=False,
|
||||
visibility=[]):
|
||||
if not name.endswith("_gen"):
|
||||
fail("name must end in _gen")
|
||||
if not visibility:
|
||||
visibility = ["//visibility:private"]
|
||||
bare_op_name = name[:-4] # Strip of the _gen
|
||||
tf_gen_op_wrapper_py(name=bare_op_name,
|
||||
out=out,
|
||||
hidden_file="ops/hidden_ops.txt",
|
||||
visibility=["//visibility:private"],
|
||||
visibility=visibility,
|
||||
deps=deps,
|
||||
require_shape_functions=require_shape_functions,
|
||||
generated_target_name=name,
|
||||
|
@ -32,6 +32,10 @@ py_library(
|
||||
name = "debug_data",
|
||||
srcs = ["debug_data.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:platform",
|
||||
],
|
||||
)
|
||||
|
||||
py_library(
|
||||
@ -50,6 +54,8 @@ py_library(
|
||||
deps = [
|
||||
":debug_data",
|
||||
"//tensorflow/python:data_flow_ops",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:session_ops",
|
||||
],
|
||||
)
|
||||
|
||||
@ -60,6 +66,7 @@ py_library(
|
||||
deps = [
|
||||
":debug_utils",
|
||||
":stepper",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:session",
|
||||
],
|
||||
)
|
||||
@ -68,6 +75,7 @@ py_library(
|
||||
name = "debugger_cli_common",
|
||||
srcs = ["cli/debugger_cli_common.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = ["//tensorflow/python:platform"],
|
||||
)
|
||||
|
||||
py_library(
|
||||
@ -92,6 +100,7 @@ py_library(
|
||||
":debugger_cli_common",
|
||||
":tensor_format",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
@ -155,9 +164,12 @@ py_library(
|
||||
srcs = ["wrappers/hooks.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":debug_utils",
|
||||
":framework",
|
||||
":local_cli_wrapper",
|
||||
":stepper",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:training",
|
||||
],
|
||||
)
|
||||
|
||||
@ -214,6 +226,7 @@ py_test(
|
||||
":debug_data",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
)
|
||||
|
||||
@ -227,8 +240,10 @@ py_test(
|
||||
deps = [
|
||||
":debug_utils",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
@ -247,6 +262,7 @@ cuda_py_test(
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:training",
|
||||
"//tensorflow/python:variables",
|
||||
@ -262,9 +278,13 @@ py_test(
|
||||
":debug_data",
|
||||
":framework",
|
||||
":stepper",
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
@ -283,6 +303,8 @@ py_test(
|
||||
":tensor_format",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
)
|
||||
|
||||
@ -294,9 +316,17 @@ py_library(
|
||||
":debug_data",
|
||||
":debug_utils",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:client_testlib",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -305,8 +335,15 @@ cuda_py_test(
|
||||
size = "small",
|
||||
srcs = ["session_debug_file_test.py"],
|
||||
additional_deps = [
|
||||
":debug_data",
|
||||
":debug_utils",
|
||||
":session_debug_testlib",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -320,6 +357,8 @@ py_test(
|
||||
deps = [
|
||||
":debugger_cli_common",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
)
|
||||
|
||||
@ -333,6 +372,7 @@ py_test(
|
||||
deps = [
|
||||
":command_parser",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
)
|
||||
|
||||
@ -346,6 +386,7 @@ py_test(
|
||||
deps = [
|
||||
":tensor_format",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
],
|
||||
)
|
||||
|
||||
@ -358,7 +399,12 @@ py_test(
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":cli_shared",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -374,9 +420,15 @@ cuda_py_test(
|
||||
":debug_utils",
|
||||
":debugger_cli_common",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//tensorflow/python:client_testlib",
|
||||
"//tensorflow/python:control_flow_ops",
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:variables",
|
||||
],
|
||||
)
|
||||
|
||||
@ -393,6 +445,7 @@ cuda_py_test(
|
||||
"//tensorflow/python:framework",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:training",
|
||||
@ -414,6 +467,7 @@ py_test(
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:framework_test_lib",
|
||||
"//tensorflow/python:math_ops",
|
||||
"//tensorflow/python:platform_test",
|
||||
"//tensorflow/python:session",
|
||||
"//tensorflow/python:state_ops",
|
||||
"//tensorflow/python:variables",
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -12,19 +12,26 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.ops.argmax_op."""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
class ArgMaxTest(tf.test.TestCase):
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
def _testArg(self, method, x, dimension,
|
||||
expected_values, use_gpu=False, expected_err_re=None):
|
||||
|
||||
class ArgMaxTest(test.TestCase):
|
||||
|
||||
def _testArg(self,
|
||||
method,
|
||||
x,
|
||||
dimension,
|
||||
expected_values,
|
||||
use_gpu=False,
|
||||
expected_err_re=None):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
ans = method(x, dimension=dimension)
|
||||
if expected_err_re is None:
|
||||
@ -35,28 +42,30 @@ class ArgMaxTest(tf.test.TestCase):
|
||||
with self.assertRaisesOpError(expected_err_re):
|
||||
ans.eval()
|
||||
|
||||
def _testBothArg(self, method, x, dimension,
|
||||
expected_values, expected_err_re=None):
|
||||
self._testArg(method, x, dimension,
|
||||
expected_values, True, expected_err_re)
|
||||
self._testArg(method, x, dimension,
|
||||
expected_values, False, expected_err_re)
|
||||
def _testBothArg(self,
|
||||
method,
|
||||
x,
|
||||
dimension,
|
||||
expected_values,
|
||||
expected_err_re=None):
|
||||
self._testArg(method, x, dimension, expected_values, True, expected_err_re)
|
||||
self._testArg(method, x, dimension, expected_values, False, expected_err_re)
|
||||
|
||||
def _testBasic(self, dtype):
|
||||
x = np.asarray(100*np.random.randn(200), dtype=dtype)
|
||||
x = np.asarray(100 * np.random.randn(200), dtype=dtype)
|
||||
|
||||
# Check that argmin and argmax match numpy along the primary
|
||||
# dimension
|
||||
self._testBothArg(tf.argmax, x, 0, x.argmax())
|
||||
self._testBothArg(tf.argmin, x, 0, x.argmin())
|
||||
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
|
||||
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
|
||||
|
||||
def _testDim(self, dtype):
|
||||
x = np.asarray(100*np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
|
||||
x = np.asarray(100 * np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
|
||||
|
||||
# Check that argmin and argmax match numpy along all dimensions
|
||||
for dim in range(-5, 5):
|
||||
self._testBothArg(tf.argmax, x, dim, x.argmax(dim))
|
||||
self._testBothArg(tf.argmin, x, dim, x.argmin(dim))
|
||||
self._testBothArg(math_ops.argmax, x, dim, x.argmax(dim))
|
||||
self._testBothArg(math_ops.argmin, x, dim, x.argmin(dim))
|
||||
|
||||
def testFloat(self):
|
||||
self._testBasic(np.float32)
|
||||
@ -76,11 +85,11 @@ class ArgMaxTest(tf.test.TestCase):
|
||||
|
||||
def testEmpty(self):
|
||||
with self.test_session():
|
||||
for op in tf.argmin, tf.argmax:
|
||||
for op in math_ops.argmin, math_ops.argmax:
|
||||
with self.assertRaisesOpError(
|
||||
r"Reduction axis 0 is empty in shape \[0\]"):
|
||||
op([], 0).eval()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for array_ops."""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@ -21,13 +20,20 @@ from __future__ import print_function
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.client import session
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import errors
|
||||
from tensorflow.python.framework import errors_impl
|
||||
from tensorflow.python.framework import sparse_tensor
|
||||
from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradients_impl
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.platform import test as test_lib
|
||||
|
||||
|
||||
class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
|
||||
@ -36,7 +42,7 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
|
||||
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
|
||||
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
|
||||
with self.test_session():
|
||||
transposed = tf.matrix_transpose(matrix)
|
||||
transposed = array_ops.matrix_transpose(matrix)
|
||||
self.assertEqual((3, 2), transposed.get_shape())
|
||||
self.assertAllEqual(expected_transposed, transposed.eval())
|
||||
|
||||
@ -48,7 +54,7 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
|
||||
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
|
||||
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
|
||||
with self.test_session():
|
||||
transposed = tf.matrix_transpose(batch_matrix)
|
||||
transposed = array_ops.matrix_transpose(batch_matrix)
|
||||
self.assertEqual((2, 3, 2), transposed.get_shape())
|
||||
self.assertAllEqual(expected_transposed, transposed.eval())
|
||||
|
||||
@ -56,11 +62,10 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
|
||||
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
|
||||
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
|
||||
with self.test_session():
|
||||
matrix_ph = tf.placeholder(tf.int32)
|
||||
transposed = tf.matrix_transpose(matrix_ph)
|
||||
matrix_ph = array_ops.placeholder(dtypes.int32)
|
||||
transposed = array_ops.matrix_transpose(matrix_ph)
|
||||
self.assertAllEqual(
|
||||
expected_transposed,
|
||||
transposed.eval(feed_dict={matrix_ph: matrix}))
|
||||
expected_transposed, transposed.eval(feed_dict={matrix_ph: matrix}))
|
||||
|
||||
def testBatchMatrixDynamicallyDefined(self):
|
||||
matrix_0 = [[1, 2, 3], [4, 5, 6]]
|
||||
@ -70,8 +75,8 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
|
||||
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
|
||||
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
|
||||
with self.test_session():
|
||||
batch_matrix_ph = tf.placeholder(tf.int32)
|
||||
transposed = tf.matrix_transpose(batch_matrix_ph)
|
||||
batch_matrix_ph = array_ops.placeholder(dtypes.int32)
|
||||
transposed = array_ops.matrix_transpose(batch_matrix_ph)
|
||||
self.assertAllEqual(
|
||||
expected_transposed,
|
||||
transposed.eval(feed_dict={batch_matrix_ph: batch_matrix}))
|
||||
@ -80,7 +85,7 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
|
||||
vector = [1, 2, 3]
|
||||
with self.test_session():
|
||||
with self.assertRaisesRegexp(ValueError, "should be a "):
|
||||
tf.matrix_transpose(vector)
|
||||
array_ops.matrix_transpose(vector)
|
||||
|
||||
|
||||
class BooleanMaskTest(test_util.TensorFlowTestCase):
|
||||
@ -93,7 +98,7 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
|
||||
if make_mask is None:
|
||||
make_mask = lambda shape: self.rng.randint(0, 2, size=shape).astype(bool)
|
||||
arr = np.random.rand(*arr_shape)
|
||||
mask = make_mask(arr_shape[: ndims_mask])
|
||||
mask = make_mask(arr_shape[:ndims_mask])
|
||||
masked_arr = arr[mask]
|
||||
with self.test_session():
|
||||
masked_tensor = array_ops.boolean_mask(arr, mask)
|
||||
@ -128,7 +133,7 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
|
||||
mask = np.array([True, False])
|
||||
arr = np.array([[], []]).astype(np.float32)
|
||||
numpy_result = arr[mask]
|
||||
tf_result = tf.boolean_mask(arr, mask)
|
||||
tf_result = array_ops.boolean_mask(arr, mask)
|
||||
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
|
||||
with self.test_session():
|
||||
self.assertAllClose(numpy_result, tf_result.eval())
|
||||
@ -137,7 +142,7 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
|
||||
mask = np.array([]).astype(bool)
|
||||
arr = np.array([]).astype(np.float32)
|
||||
numpy_result = arr[mask]
|
||||
tf_result = tf.boolean_mask(arr, mask)
|
||||
tf_result = array_ops.boolean_mask(arr, mask)
|
||||
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
|
||||
with self.test_session():
|
||||
self.assertAllClose(numpy_result, tf_result.eval())
|
||||
@ -160,9 +165,9 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
|
||||
arr = np.array([[1, 2], [3, 4]])
|
||||
mask = np.array([False, True])
|
||||
|
||||
masked_tensor = sess.run(
|
||||
array_ops.boolean_mask(ph_tensor, ph_mask),
|
||||
feed_dict={ph_tensor: arr, ph_mask: mask})
|
||||
masked_tensor = sess.run(array_ops.boolean_mask(ph_tensor, ph_mask),
|
||||
feed_dict={ph_tensor: arr,
|
||||
ph_mask: mask})
|
||||
np.testing.assert_allclose(masked_tensor, arr[mask])
|
||||
|
||||
def testMaskDimensionsSetToNoneRaises(self):
|
||||
@ -262,13 +267,13 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
|
||||
def testInvalid(self):
|
||||
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
|
||||
with self.test_session():
|
||||
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
|
||||
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
|
||||
"is out of valid range"):
|
||||
array_ops.reverse_v2(x_np, [-30]).eval()
|
||||
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
|
||||
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
|
||||
"is out of valid range"):
|
||||
array_ops.reverse_v2(x_np, [2]).eval()
|
||||
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
|
||||
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
|
||||
"axis 0 specified more than once"):
|
||||
array_ops.reverse_v2(x_np, [0, -2]).eval()
|
||||
|
||||
@ -288,18 +293,18 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
|
||||
|
||||
def testUnknownDims(self):
|
||||
reverse_v2 = array_ops.reverse_v2
|
||||
data_t = tf.placeholder(tf.float32)
|
||||
axis_known_t = tf.placeholder(tf.int32, shape=[3])
|
||||
data_t = array_ops.placeholder(dtypes.float32)
|
||||
axis_known_t = array_ops.placeholder(dtypes.int32, shape=[3])
|
||||
reverse_known_t = reverse_v2(data_t, axis_known_t)
|
||||
# Unlike V1 we cannot know this anymore
|
||||
self.assertEqual(None, reverse_known_t.get_shape().ndims)
|
||||
|
||||
axis_unknown_t = tf.placeholder(tf.int32)
|
||||
axis_unknown_t = array_ops.placeholder(dtypes.int32)
|
||||
reverse_unknown_t = reverse_v2(data_t, axis_unknown_t)
|
||||
self.assertIs(None, reverse_unknown_t.get_shape().ndims)
|
||||
|
||||
data_2d_t = tf.placeholder(tf.float32, shape=[None, None])
|
||||
axis_2d_t = tf.placeholder(tf.int32, shape=[3])
|
||||
data_2d_t = array_ops.placeholder(dtypes.float32, shape=[None, None])
|
||||
axis_2d_t = array_ops.placeholder(dtypes.int32, shape=[3])
|
||||
reverse_2d_t = reverse_v2(data_2d_t, axis_2d_t)
|
||||
self.assertEqual(2, reverse_2d_t.get_shape().ndims)
|
||||
|
||||
@ -307,7 +312,7 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
|
||||
class MeshgridTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def _compareDiff(self, x, y, use_gpu):
|
||||
for index in ('ij', 'xy'):
|
||||
for index in ("ij", "xy"):
|
||||
numpy_out = np.meshgrid(x, y, indexing=index)
|
||||
tf_out = array_ops.meshgrid(x, y, indexing=index)
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
@ -316,7 +321,7 @@ class MeshgridTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def _compareDiffType(self, n, np_dtype, use_gpu):
|
||||
inputs = []
|
||||
for index in ('ij', 'xy'):
|
||||
for index in ("ij", "xy"):
|
||||
for i in range(n):
|
||||
x = np.linspace(-10, 10, 5).astype(np_dtype)
|
||||
if np_dtype in (np.complex64, np.complex128):
|
||||
@ -330,7 +335,7 @@ class MeshgridTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def testCompare(self):
|
||||
for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
|
||||
np.complex64, np.complex128):
|
||||
np.complex64, np.complex128):
|
||||
self._compareDiffType(2, t, False)
|
||||
self._compareDiffType(3, t, False)
|
||||
|
||||
@ -349,9 +354,11 @@ class StridedSliceChecker(object):
|
||||
REF_TENSOR = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
|
||||
REF_TENSOR_ALIGNED = np.arange(1, 97, dtype=np.float32).reshape(3, 4, 8)
|
||||
|
||||
def __init__(self, test, x, tensor_type=tf.int32, check_type_infer=True):
|
||||
def __init__(self, test, x, tensor_type=dtypes.int32, check_type_infer=True):
|
||||
self.test = test
|
||||
self.x = tf.cast(tf.constant(x, dtype=tf.float32), dtype=tensor_type)
|
||||
self.x = math_ops.cast(
|
||||
constant_op.constant(
|
||||
x, dtype=dtypes.float32), dtype=tensor_type)
|
||||
self.x_np = np.array(x)
|
||||
self.check_type_infer = check_type_infer
|
||||
|
||||
@ -390,8 +397,10 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
"""Test the strided slice operation with variants of slices."""
|
||||
|
||||
def test_basic_slice(self):
|
||||
for tensor_type in [tf.int32, tf.int64, tf.int16, tf.int8, tf.float32,
|
||||
tf.float64]:
|
||||
for tensor_type in [
|
||||
dtypes.int32, dtypes.int64, dtypes.int16, dtypes.int8, dtypes.float32,
|
||||
dtypes.float64
|
||||
]:
|
||||
for use_gpu in [False, True]:
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
checker = StridedSliceChecker(
|
||||
@ -413,7 +422,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
_ = checker[-2::-1, :, ::2]
|
||||
|
||||
# Check rank-0 examples
|
||||
checker2 = StridedSliceChecker(self, 5, tensor_type=tf.int32)
|
||||
checker2 = StridedSliceChecker(self, 5, tensor_type=dtypes.int32)
|
||||
_ = checker2[None]
|
||||
_ = checker2[...]
|
||||
_ = checker2[tuple()]
|
||||
@ -467,8 +476,8 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
raw = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],
|
||||
[[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]
|
||||
checker = StridedSliceChecker(self, raw, check_type_infer=False)
|
||||
bar = tf.constant(2)
|
||||
bar2 = tf.constant(3)
|
||||
bar = constant_op.constant(2)
|
||||
bar2 = constant_op.constant(3)
|
||||
_ = checker[..., bar:bar2]
|
||||
_ = checker[..., bar]
|
||||
with self.assertRaisesRegexp(
|
||||
@ -499,7 +508,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
def testExpandVariable(self):
|
||||
for use_gpu in False, True:
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
x = tf.Variable(7, dtype=tf.int32)
|
||||
x = variables.Variable(7, dtype=dtypes.int32)
|
||||
x.initializer.run()
|
||||
y = x[None].eval()
|
||||
self.assertEqual(y.shape, (1,))
|
||||
@ -537,7 +546,7 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def testUnknown(self):
|
||||
with self.test_session(use_gpu=False):
|
||||
uncertain_tensor = tf.placeholder(tf.float32)
|
||||
uncertain_tensor = array_ops.placeholder(dtypes.float32)
|
||||
a = StridedSliceShapeChecker(uncertain_tensor)
|
||||
a_slice_shape = a[...]
|
||||
self.assertAllEqual(a_slice_shape.ndims, None)
|
||||
@ -549,7 +558,8 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
|
||||
def testTensorShapeUncertain(self):
|
||||
for use_gpu in [False, True]:
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
uncertain_tensor = tf.placeholder(tf.float32, shape=(5, None, 7))
|
||||
uncertain_tensor = array_ops.placeholder(
|
||||
dtypes.float32, shape=(5, None, 7))
|
||||
a = StridedSliceShapeChecker(uncertain_tensor)
|
||||
self.tensorShapeEqual(a[3:5], tensor_shape.TensorShape([2, None, 7]))
|
||||
self.tensorShapeEqual(a[3:5, :, 4], tensor_shape.TensorShape([2, None]))
|
||||
@ -559,29 +569,30 @@ class StridedSliceShapeTest(test_util.TensorFlowTestCase):
|
||||
tensor_shape.TensorShape([2, None, 2]))
|
||||
self.tensorShapeEqual(a[3:5, :, 50:3],
|
||||
tensor_shape.TensorShape([2, None, 0]))
|
||||
self.tensorShapeEqual(a[3:5, :, tf.newaxis, 50:3,],
|
||||
self.tensorShapeEqual(a[3:5, :, array_ops.newaxis, 50:3,],
|
||||
tensor_shape.TensorShape([2, None, 1, 0]))
|
||||
self.tensorShapeEqual(a[1:5:2, :, tf.newaxis, 50:3,],
|
||||
self.tensorShapeEqual(a[1:5:2, :, array_ops.newaxis, 50:3,],
|
||||
tensor_shape.TensorShape([2, None, 1, 0]))
|
||||
self.tensorShapeEqual(a[:5:3, :, tf.newaxis, 50:3,],
|
||||
self.tensorShapeEqual(a[:5:3, :, array_ops.newaxis, 50:3,],
|
||||
tensor_shape.TensorShape([2, None, 1, 0]))
|
||||
self.tensorShapeEqual(a[:2:3, :, tf.newaxis, 50:3,],
|
||||
self.tensorShapeEqual(a[:2:3, :, array_ops.newaxis, 50:3,],
|
||||
tensor_shape.TensorShape([1, None, 1, 0]))
|
||||
self.tensorShapeEqual(a[::-1, :, tf.newaxis, ::-2],
|
||||
self.tensorShapeEqual(a[::-1, :, array_ops.newaxis, ::-2],
|
||||
tensor_shape.TensorShape([5, None, 1, 4]))
|
||||
|
||||
def testTensorValuedIndexShape(self):
|
||||
for use_gpu in [False, True]:
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
defined_shape_tensor = tf.placeholder(tf.float32, shape=(5, 3, 7))
|
||||
index_value = tf.placeholder(tf.int32, shape=())
|
||||
defined_shape_tensor = array_ops.placeholder(
|
||||
dtypes.float32, shape=(5, 3, 7))
|
||||
index_value = array_ops.placeholder(dtypes.int32, shape=())
|
||||
a = StridedSliceShapeChecker(defined_shape_tensor)
|
||||
self.tensorShapeEqual(a[index_value], tensor_shape.TensorShape([3, 7]))
|
||||
self.tensorShapeEqual(a[index_value, ::-1],
|
||||
tensor_shape.TensorShape([3, 7]))
|
||||
self.tensorShapeEqual(a[index_value, ::-2],
|
||||
tensor_shape.TensorShape([2, 7]))
|
||||
other_scalar = tf.placeholder(tf.int32, shape=())
|
||||
other_scalar = array_ops.placeholder(dtypes.int32, shape=())
|
||||
self.tensorShapeEqual(a[index_value, other_scalar:2],
|
||||
tensor_shape.TensorShape([None, 7]))
|
||||
|
||||
@ -603,10 +614,13 @@ class GradSliceChecker(object):
|
||||
# compute analytic 2nd derivative
|
||||
analytic_grad2 = 2 * slice_val
|
||||
|
||||
dy = tf.Variable(tf.ones(shape=slice_var.get_shape(), dtype=tf.int32))
|
||||
dy = variables.Variable(
|
||||
array_ops.ones(
|
||||
shape=slice_var.get_shape(), dtype=dtypes.int32))
|
||||
assign = dy.assign(slice_var)
|
||||
slice_val_grad, = tf.gradients(slice_val, self.var, grad_ys=dy)
|
||||
slice_val_grad2, = tf.gradients(slice_val_grad, dy, grad_ys=self.var)
|
||||
slice_val_grad, = gradients_impl.gradients(slice_val, self.var, grad_ys=dy)
|
||||
slice_val_grad2, = gradients_impl.gradients(
|
||||
slice_val_grad, dy, grad_ys=self.var)
|
||||
self.sess.run(assign)
|
||||
slice_val_grad_evaled, slice_val_grad2_evaled = (
|
||||
self.sess.run([slice_val_grad, slice_val_grad2]))
|
||||
@ -627,15 +641,17 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
|
||||
def testGradient(self):
|
||||
for use_gpu in [False, True]:
|
||||
with self.test_session(use_gpu=use_gpu) as sess:
|
||||
var = tf.Variable(tf.reshape(tf.range(1, 97, 1), shape=(6, 4, 4)))
|
||||
init = tf.global_variables_initializer()
|
||||
var = variables.Variable(
|
||||
array_ops.reshape(
|
||||
math_ops.range(1, 97, 1), shape=(6, 4, 4)))
|
||||
init = variables.global_variables_initializer()
|
||||
sess.run(init)
|
||||
|
||||
grad = GradSliceChecker(self, sess, var,
|
||||
np.array(range(1, 97, 1)).reshape((6, 4, 4)))
|
||||
_ = grad[2:6:2, 1:3, 1:3]
|
||||
_ = grad[3:0:-2, 1:3, 1:3]
|
||||
_ = grad[3:0:-2, tf.newaxis, 1:3, 2, tf.newaxis]
|
||||
_ = grad[3:0:-2, array_ops.newaxis, 1:3, 2, array_ops.newaxis]
|
||||
_ = grad[3:0:-2, 1:3, 2]
|
||||
_ = grad[:, -1, :]
|
||||
_ = grad[:, -2, :]
|
||||
@ -647,11 +663,10 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
|
||||
def testGradientZero(self):
|
||||
for use_gpu in [False, True]:
|
||||
with self.test_session(use_gpu=use_gpu) as sess:
|
||||
var = tf.Variable(8)
|
||||
init = tf.global_variables_initializer()
|
||||
var = variables.Variable(8)
|
||||
init = variables.global_variables_initializer()
|
||||
sess.run(init)
|
||||
grad = GradSliceChecker(self, sess, var,
|
||||
np.array(8))
|
||||
grad = GradSliceChecker(self, sess, var, np.array(8))
|
||||
_ = grad[tuple()]
|
||||
|
||||
|
||||
@ -660,39 +675,42 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def testHostVsDevice(self):
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
var2 = tf.Variable(
|
||||
tf.reshape(
|
||||
tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1)))
|
||||
varshape = tf.Variable([6, 4, 4], dtype=tf.int32)
|
||||
sess.run(tf.global_variables_initializer())
|
||||
begin = tf.constant([0, 0, 0])
|
||||
end = tf.constant([4, 1, 1])
|
||||
strides = tf.constant([1, 1, 1])
|
||||
var2 = variables.Variable(
|
||||
array_ops.reshape(
|
||||
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
||||
shape=(4, 1, 1)))
|
||||
varshape = variables.Variable([6, 4, 4], dtype=dtypes.int32)
|
||||
sess.run(variables.global_variables_initializer())
|
||||
begin = constant_op.constant([0, 0, 0])
|
||||
end = constant_op.constant([4, 1, 1])
|
||||
strides = constant_op.constant([1, 1, 1])
|
||||
foo = array_ops.strided_slice_grad(varshape, begin, end, strides, var2)
|
||||
sess.run(foo)
|
||||
|
||||
def testInt64Shape(self):
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
original_dy = tf.reshape(
|
||||
tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1))
|
||||
original_shape = tf.constant([6, 4, 4], dtype=tf.int64)
|
||||
sess.run(tf.global_variables_initializer())
|
||||
begin = tf.constant([0, 0, 0], dtype=tf.int64)
|
||||
end = tf.constant([4, 1, 1], dtype=tf.int64)
|
||||
strides = tf.constant([1, 1, 1], dtype=tf.int64)
|
||||
original_dy = array_ops.reshape(
|
||||
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
||||
shape=(4, 1, 1))
|
||||
original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
|
||||
sess.run(variables.global_variables_initializer())
|
||||
begin = constant_op.constant([0, 0, 0], dtype=dtypes.int64)
|
||||
end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
|
||||
strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
|
||||
dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
|
||||
original_dy)
|
||||
sess.run(dx)
|
||||
|
||||
def testMixedIndexTypes(self):
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
original_dy = tf.reshape(
|
||||
tf.cast(tf.range(1, 5, 1), tf.float32), shape=(4, 1, 1))
|
||||
original_shape = tf.constant([6, 4, 4], dtype=tf.int64)
|
||||
sess.run(tf.global_variables_initializer())
|
||||
begin = tf.constant([0, 0, 0], dtype=tf.int32)
|
||||
end = tf.constant([4, 1, 1], dtype=tf.int64)
|
||||
strides = tf.constant([1, 1, 1], dtype=tf.int64)
|
||||
original_dy = array_ops.reshape(
|
||||
math_ops.cast(math_ops.range(1, 5, 1), dtypes.float32),
|
||||
shape=(4, 1, 1))
|
||||
original_shape = constant_op.constant([6, 4, 4], dtype=dtypes.int64)
|
||||
sess.run(variables.global_variables_initializer())
|
||||
begin = constant_op.constant([0, 0, 0], dtype=dtypes.int32)
|
||||
end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
|
||||
strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
|
||||
with self.assertRaisesRegexp(
|
||||
TypeError, "Input 'begin' of 'StridedSliceGrad' Op has type int32"
|
||||
" that does not match type int64 of argument 'shape'"):
|
||||
@ -710,11 +728,11 @@ class BenchmarkSlice(object):
|
||||
return self.tensor[x]
|
||||
|
||||
|
||||
class StridedSliceBenchmark(tf.test.Benchmark):
|
||||
class StridedSliceBenchmark(test_lib.Benchmark):
|
||||
"""Benchmark new strided slice operation on non-trivial case."""
|
||||
|
||||
def run_and_time(self, slice_op):
|
||||
tf.global_variables_initializer().run()
|
||||
variables.global_variables_initializer().run()
|
||||
for _ in range(10):
|
||||
_ = slice_op.eval()
|
||||
iters = 1000
|
||||
@ -728,28 +746,27 @@ class StridedSliceBenchmark(tf.test.Benchmark):
|
||||
n = 256
|
||||
shape = (n, n, n)
|
||||
items = n**3
|
||||
var = tf.Variable(
|
||||
tf.reshape(
|
||||
tf.linspace(1., float(items), items), shape),
|
||||
dtype=tf.float32)
|
||||
var = variables.Variable(
|
||||
array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
|
||||
dtype=dtypes.float32)
|
||||
return var
|
||||
|
||||
def benchmark_strided_slice_skip(self):
|
||||
with tf.Session():
|
||||
with session.Session():
|
||||
var = self.make_variable()
|
||||
helper = BenchmarkSlice(var)
|
||||
slice_op = helper[::2, ::1, ::2]
|
||||
self.run_and_time(slice_op)
|
||||
|
||||
def benchmark_strided_slice_easy(self):
|
||||
with tf.Session():
|
||||
with session.Session():
|
||||
var = self.make_variable()
|
||||
helper = BenchmarkSlice(var)
|
||||
slice_op = helper[3::1, 3::1, 3::1]
|
||||
self.run_and_time(slice_op)
|
||||
|
||||
def benchmark_slice_easy(self):
|
||||
with tf.Session():
|
||||
with session.Session():
|
||||
var = self.make_variable()
|
||||
slice_op = var[3::1, 3::1, 3::1]
|
||||
self.run_and_time(slice_op)
|
||||
@ -757,19 +774,21 @@ class StridedSliceBenchmark(tf.test.Benchmark):
|
||||
|
||||
class StridedSliceAssignChecker(object):
|
||||
|
||||
def __init__(self, test, x, tensor_type=tf.float32):
|
||||
def __init__(self, test, x, tensor_type=dtypes.float32):
|
||||
self.tensor_type = tensor_type
|
||||
self.test = test
|
||||
self.x = tf.cast(tf.constant(x, dtype=tf.float32), dtype=tensor_type)
|
||||
self.x = math_ops.cast(
|
||||
constant_op.constant(
|
||||
x, dtype=dtypes.float32), dtype=tensor_type)
|
||||
self.x_np = np.array(x)
|
||||
|
||||
def __setitem__(self, index, value):
|
||||
for use_gpu in [False, True]:
|
||||
with self.test.test_session(use_gpu=use_gpu) as sess:
|
||||
var = tf.Variable(self.x)
|
||||
sess.run(tf.initialize_variables([var]))
|
||||
var = variables.Variable(self.x)
|
||||
sess.run(variables.initialize_variables([var]))
|
||||
val = sess.run(var[index].assign(
|
||||
tf.constant(
|
||||
constant_op.constant(
|
||||
value, dtype=self.tensor_type)))
|
||||
valnp = np.copy(self.x_np)
|
||||
valnp[index] = np.array(value)
|
||||
@ -780,10 +799,10 @@ class SliceAssignTest(test_util.TensorFlowTestCase):
|
||||
|
||||
def testInvalidSlice(self):
|
||||
with self.test_session() as sess:
|
||||
foo = tf.constant([1, 2, 3])
|
||||
foo = constant_op.constant([1, 2, 3])
|
||||
with self.assertRaisesRegexp(ValueError, "Sliced assignment"
|
||||
" is only supported for variables"):
|
||||
bar = foo[:2].assign(tf.constant([1, 2]))
|
||||
bar = foo[:2].assign(constant_op.constant([1, 2]))
|
||||
sess.run(bar)
|
||||
|
||||
def testSliceAssign(self):
|
||||
@ -816,7 +835,7 @@ class SliceAssignTest(test_util.TensorFlowTestCase):
|
||||
errors.FailedPreconditionError,
|
||||
"Attempting to use uninitialized value Variable"):
|
||||
with self.test_session() as sess:
|
||||
v = tf.Variable([1, 2])
|
||||
v = variables.Variable([1, 2])
|
||||
sess.run(v[:].assign([1, 2]))
|
||||
|
||||
|
||||
@ -825,29 +844,27 @@ class ShapeSizeRankTest(test_util.TensorFlowTestCase):
|
||||
def testDenseShape(self):
|
||||
with self.test_session():
|
||||
t_value = [[0, 42], [24, 0]]
|
||||
self.assertAllEqual((2, 2), tf.shape(t_value).eval())
|
||||
self.assertEqual(4, tf.size(t_value).eval())
|
||||
self.assertEqual(2, tf.rank(t_value).eval())
|
||||
self.assertAllEqual((2, 2), array_ops.shape(t_value).eval())
|
||||
self.assertEqual(4, array_ops.size(t_value).eval())
|
||||
self.assertEqual(2, array_ops.rank(t_value).eval())
|
||||
|
||||
t = tf.constant(t_value)
|
||||
self.assertAllEqual((2, 2), tf.shape(t).eval())
|
||||
self.assertEqual(4, tf.size(t).eval())
|
||||
self.assertEqual(2, tf.rank(t).eval())
|
||||
t = constant_op.constant(t_value)
|
||||
self.assertAllEqual((2, 2), array_ops.shape(t).eval())
|
||||
self.assertEqual(4, array_ops.size(t).eval())
|
||||
self.assertEqual(2, array_ops.rank(t).eval())
|
||||
|
||||
def testSparseShape(self):
|
||||
with self.test_session():
|
||||
sp_value = tf.SparseTensorValue(
|
||||
indices=((0, 1), (1, 0)),
|
||||
values=(42, 24),
|
||||
dense_shape=(2, 2))
|
||||
self.assertAllEqual((2, 2), tf.shape(sp_value).eval())
|
||||
self.assertEqual(4, tf.size(sp_value).eval())
|
||||
self.assertEqual(2, tf.rank(sp_value).eval())
|
||||
sp_value = sparse_tensor.SparseTensorValue(
|
||||
indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
|
||||
self.assertAllEqual((2, 2), array_ops.shape(sp_value).eval())
|
||||
self.assertEqual(4, array_ops.size(sp_value).eval())
|
||||
self.assertEqual(2, array_ops.rank(sp_value).eval())
|
||||
|
||||
sp = tf.SparseTensor.from_value(sp_value)
|
||||
self.assertAllEqual((2, 2), tf.shape(sp).eval())
|
||||
self.assertEqual(4, tf.size(sp).eval())
|
||||
self.assertEqual(2, tf.rank(sp).eval())
|
||||
sp = sparse_tensor.SparseTensor.from_value(sp_value)
|
||||
self.assertAllEqual((2, 2), array_ops.shape(sp).eval())
|
||||
self.assertEqual(4, array_ops.size(sp).eval())
|
||||
self.assertEqual(2, array_ops.rank(sp).eval())
|
||||
|
||||
|
||||
class SequenceMaskTest(test_util.TensorFlowTestCase):
|
||||
@ -855,40 +872,45 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
|
||||
def testExceptions(self):
|
||||
with self.test_session():
|
||||
with self.assertRaisesRegexp(ValueError, "lengths must be 1D"):
|
||||
tf.sequence_mask([[10, 20]], [10, 20])
|
||||
array_ops.sequence_mask([[10, 20]], [10, 20])
|
||||
with self.assertRaisesRegexp(ValueError, "maxlen must be scalar"):
|
||||
tf.sequence_mask([10, 20], [10, 20])
|
||||
array_ops.sequence_mask([10, 20], [10, 20])
|
||||
|
||||
def testNormal(self):
|
||||
with self.test_session():
|
||||
res = tf.sequence_mask(tf.constant([1, 3, 2]), 5)
|
||||
res = array_ops.sequence_mask(constant_op.constant([1, 3, 2]), 5)
|
||||
self.assertAllEqual(res.get_shape(), [3, 5])
|
||||
self.assertAllEqual(res.eval(), [[True, False, False, False, False],
|
||||
[True, True, True, False, False],
|
||||
[True, True, False, False, False]])
|
||||
|
||||
# test dtype and default maxlen:
|
||||
res = tf.sequence_mask(tf.constant([0, 1, 4]), dtype=tf.float32)
|
||||
res = array_ops.sequence_mask(
|
||||
constant_op.constant([0, 1, 4]), dtype=dtypes.float32)
|
||||
self.assertAllEqual(res.get_shape().as_list(), [3, None])
|
||||
self.assertAllEqual(res.eval(), [[0.0, 0.0, 0.0, 0.0],
|
||||
[1.0, 0.0, 0.0, 0.0],
|
||||
[1.0, 1.0, 1.0, 1.0]])
|
||||
|
||||
def testDtypes(self):
|
||||
|
||||
def check_dtypes(lengths_dtype, maxlen_dtype):
|
||||
res = tf.sequence_mask(tf.constant([1, 3, 2], dtype=lengths_dtype),
|
||||
tf.constant(5, dtype=maxlen_dtype))
|
||||
res = array_ops.sequence_mask(
|
||||
constant_op.constant(
|
||||
[1, 3, 2], dtype=lengths_dtype),
|
||||
constant_op.constant(
|
||||
5, dtype=maxlen_dtype))
|
||||
self.assertAllEqual(res.get_shape(), [3, 5])
|
||||
self.assertAllEqual(res.eval(), [[True, False, False, False, False],
|
||||
[True, True, True, False, False],
|
||||
[True, True, False, False, False]])
|
||||
|
||||
with self.test_session():
|
||||
check_dtypes(tf.int32, tf.int32)
|
||||
check_dtypes(tf.int32, tf.int64)
|
||||
check_dtypes(tf.int64, tf.int32)
|
||||
check_dtypes(tf.int64, tf.int64)
|
||||
check_dtypes(dtypes.int32, dtypes.int32)
|
||||
check_dtypes(dtypes.int32, dtypes.int64)
|
||||
check_dtypes(dtypes.int64, dtypes.int32)
|
||||
check_dtypes(dtypes.int64, dtypes.int64)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test_lib.main()
|
||||
|
@ -12,69 +12,70 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for as_string_op."""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import string_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class AsStringOpTest(tf.test.TestCase):
|
||||
class AsStringOpTest(test.TestCase):
|
||||
|
||||
def testFloat(self):
|
||||
float_inputs_ = [0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"),
|
||||
float("-INF")]
|
||||
float_inputs_ = [
|
||||
0, 1, -1, 0.5, 0.25, 0.125, float("INF"), float("NAN"), float("-INF")
|
||||
]
|
||||
|
||||
with self.test_session():
|
||||
for dtype in (tf.float32, tf.float64):
|
||||
input_ = tf.placeholder(dtype)
|
||||
for dtype in (dtypes.float32, dtypes.float64):
|
||||
input_ = array_ops.placeholder(dtype)
|
||||
|
||||
output = tf.as_string(input_, shortest=True)
|
||||
output = string_ops.as_string(input_, shortest=True)
|
||||
result = output.eval(feed_dict={input_: float_inputs_})
|
||||
s = lambda strs: [x.decode("ascii") for x in strs]
|
||||
self.assertAllEqual(s(result), ["%g" % x for x in float_inputs_])
|
||||
|
||||
output = tf.as_string(input_, scientific=True)
|
||||
output = string_ops.as_string(input_, scientific=True)
|
||||
result = output.eval(feed_dict={input_: float_inputs_})
|
||||
self.assertAllEqual(s(result), ["%e" % x for x in float_inputs_])
|
||||
|
||||
output = tf.as_string(input_)
|
||||
output = string_ops.as_string(input_)
|
||||
result = output.eval(feed_dict={input_: float_inputs_})
|
||||
self.assertAllEqual(s(result), ["%f" % x for x in float_inputs_])
|
||||
|
||||
output = tf.as_string(input_, width=3)
|
||||
output = string_ops.as_string(input_, width=3)
|
||||
result = output.eval(feed_dict={input_: float_inputs_})
|
||||
self.assertAllEqual(s(result), ["%3f" % x for x in float_inputs_])
|
||||
|
||||
output = tf.as_string(input_, width=3, fill="0")
|
||||
output = string_ops.as_string(input_, width=3, fill="0")
|
||||
result = output.eval(feed_dict={input_: float_inputs_})
|
||||
self.assertAllEqual(s(result), ["%03f" % x for x in float_inputs_])
|
||||
|
||||
output = tf.as_string(input_, width=3, fill="0", shortest=True)
|
||||
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
|
||||
result = output.eval(feed_dict={input_: float_inputs_})
|
||||
self.assertAllEqual(s(result), ["%03g" % x for x in float_inputs_])
|
||||
|
||||
output = tf.as_string(input_, precision=10, width=3)
|
||||
output = string_ops.as_string(input_, precision=10, width=3)
|
||||
result = output.eval(feed_dict={input_: float_inputs_})
|
||||
self.assertAllEqual(s(result), ["%03.10f" % x for x in float_inputs_])
|
||||
|
||||
output = tf.as_string(input_,
|
||||
precision=10,
|
||||
width=3,
|
||||
fill="0",
|
||||
shortest=True)
|
||||
output = string_ops.as_string(
|
||||
input_, precision=10, width=3, fill="0", shortest=True)
|
||||
result = output.eval(feed_dict={input_: float_inputs_})
|
||||
self.assertAllEqual(s(result), ["%03.10g" % x for x in float_inputs_])
|
||||
|
||||
with self.assertRaisesOpError("Cannot select both"):
|
||||
output = tf.as_string(input_, scientific=True, shortest=True)
|
||||
output = string_ops.as_string(input_, scientific=True, shortest=True)
|
||||
output.eval(feed_dict={input_: float_inputs_})
|
||||
|
||||
with self.assertRaisesOpError("Fill string must be one or fewer"):
|
||||
output = tf.as_string(input_, fill="ab")
|
||||
output = string_ops.as_string(input_, fill="ab")
|
||||
output.eval(feed_dict={input_: float_inputs_})
|
||||
|
||||
def testInt(self):
|
||||
@ -84,31 +85,31 @@ class AsStringOpTest(tf.test.TestCase):
|
||||
s = lambda strs: [x.decode("ascii") for x in strs]
|
||||
|
||||
with self.test_session():
|
||||
for dtype in (tf.int32, tf.int64, tf.int8):
|
||||
input_ = tf.placeholder(dtype)
|
||||
for dtype in (dtypes.int32, dtypes.int64, dtypes.int8):
|
||||
input_ = array_ops.placeholder(dtype)
|
||||
|
||||
output = tf.as_string(input_)
|
||||
output = string_ops.as_string(input_)
|
||||
result = output.eval(feed_dict={input_: int_inputs_})
|
||||
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
|
||||
|
||||
output = tf.as_string(input_, width=3)
|
||||
output = string_ops.as_string(input_, width=3)
|
||||
result = output.eval(feed_dict={input_: int_inputs_})
|
||||
self.assertAllEqual(s(result), ["%3d" % x for x in int_inputs_])
|
||||
|
||||
output = tf.as_string(input_, width=3, fill="0")
|
||||
output = string_ops.as_string(input_, width=3, fill="0")
|
||||
result = output.eval(feed_dict={input_: int_inputs_})
|
||||
self.assertAllEqual(s(result), ["%03d" % x for x in int_inputs_])
|
||||
|
||||
with self.assertRaisesOpError("scientific and shortest"):
|
||||
output = tf.as_string(input_, scientific=True)
|
||||
output = string_ops.as_string(input_, scientific=True)
|
||||
output.eval(feed_dict={input_: int_inputs_})
|
||||
|
||||
with self.assertRaisesOpError("scientific and shortest"):
|
||||
output = tf.as_string(input_, shortest=True)
|
||||
output = string_ops.as_string(input_, shortest=True)
|
||||
output.eval(feed_dict={input_: int_inputs_})
|
||||
|
||||
with self.assertRaisesOpError("precision not supported"):
|
||||
output = tf.as_string(input_, precision=0)
|
||||
output = string_ops.as_string(input_, precision=0)
|
||||
output.eval(feed_dict={input_: int_inputs_})
|
||||
|
||||
def testLargeInt(self):
|
||||
@ -117,15 +118,15 @@ class AsStringOpTest(tf.test.TestCase):
|
||||
s = lambda strs: [x.decode("ascii") for x in strs]
|
||||
|
||||
with self.test_session():
|
||||
input_ = tf.placeholder(tf.int32)
|
||||
input_ = array_ops.placeholder(dtypes.int32)
|
||||
int_inputs_ = [np.iinfo(np.int32).min, np.iinfo(np.int32).max]
|
||||
output = tf.as_string(input_)
|
||||
output = string_ops.as_string(input_)
|
||||
result = output.eval(feed_dict={input_: int_inputs_})
|
||||
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
|
||||
|
||||
input_ = tf.placeholder(tf.int64)
|
||||
input_ = array_ops.placeholder(dtypes.int64)
|
||||
int_inputs_ = [np.iinfo(np.int64).min, np.iinfo(np.int64).max]
|
||||
output = tf.as_string(input_)
|
||||
output = string_ops.as_string(input_)
|
||||
result = output.eval(feed_dict={input_: int_inputs_})
|
||||
self.assertAllEqual(s(result), ["%d" % x for x in int_inputs_])
|
||||
|
||||
@ -134,75 +135,74 @@ class AsStringOpTest(tf.test.TestCase):
|
||||
s = lambda strs: [x.decode("ascii") for x in strs]
|
||||
|
||||
with self.test_session():
|
||||
for dtype in (tf.bool,):
|
||||
input_ = tf.placeholder(dtype)
|
||||
for dtype in (dtypes.bool,):
|
||||
input_ = array_ops.placeholder(dtype)
|
||||
|
||||
output = tf.as_string(input_)
|
||||
output = string_ops.as_string(input_)
|
||||
result = output.eval(feed_dict={input_: bool_inputs_})
|
||||
self.assertAllEqual(s(result), ["false", "true"])
|
||||
|
||||
def testComplex(self):
|
||||
float_inputs_ = [0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
|
||||
complex("-INF")]
|
||||
float_inputs_ = [
|
||||
0, 1, -1, 0.5, 0.25, 0.125, complex("INF"), complex("NAN"),
|
||||
complex("-INF")
|
||||
]
|
||||
complex_inputs_ = [(x + (x + 1) * 1j) for x in float_inputs_]
|
||||
|
||||
with self.test_session():
|
||||
for dtype in (tf.complex64,):
|
||||
input_ = tf.placeholder(dtype)
|
||||
for dtype in (dtypes.complex64,):
|
||||
input_ = array_ops.placeholder(dtype)
|
||||
|
||||
def clean_nans(s_l):
|
||||
return [s.decode("ascii").replace("-nan", "nan") for s in s_l]
|
||||
|
||||
output = tf.as_string(input_, shortest=True)
|
||||
output = string_ops.as_string(input_, shortest=True)
|
||||
result = output.eval(feed_dict={input_: complex_inputs_})
|
||||
self.assertAllEqual(clean_nans(result),
|
||||
["(%g,%g)" % (x.real, x.imag)
|
||||
for x in complex_inputs_])
|
||||
self.assertAllEqual(
|
||||
clean_nans(result),
|
||||
["(%g,%g)" % (x.real, x.imag) for x in complex_inputs_])
|
||||
|
||||
output = tf.as_string(input_, scientific=True)
|
||||
output = string_ops.as_string(input_, scientific=True)
|
||||
result = output.eval(feed_dict={input_: complex_inputs_})
|
||||
self.assertAllEqual(clean_nans(result),
|
||||
["(%e,%e)" % (x.real, x.imag)
|
||||
for x in complex_inputs_])
|
||||
self.assertAllEqual(
|
||||
clean_nans(result),
|
||||
["(%e,%e)" % (x.real, x.imag) for x in complex_inputs_])
|
||||
|
||||
output = tf.as_string(input_)
|
||||
output = string_ops.as_string(input_)
|
||||
result = output.eval(feed_dict={input_: complex_inputs_})
|
||||
self.assertAllEqual(clean_nans(result),
|
||||
["(%f,%f)" % (x.real, x.imag)
|
||||
for x in complex_inputs_])
|
||||
self.assertAllEqual(
|
||||
clean_nans(result),
|
||||
["(%f,%f)" % (x.real, x.imag) for x in complex_inputs_])
|
||||
|
||||
output = tf.as_string(input_, width=3)
|
||||
output = string_ops.as_string(input_, width=3)
|
||||
result = output.eval(feed_dict={input_: complex_inputs_})
|
||||
self.assertAllEqual(clean_nans(result),
|
||||
["(%03f,%03f)" % (x.real, x.imag)
|
||||
for x in complex_inputs_])
|
||||
self.assertAllEqual(
|
||||
clean_nans(result),
|
||||
["(%03f,%03f)" % (x.real, x.imag) for x in complex_inputs_])
|
||||
|
||||
output = tf.as_string(input_, width=3, fill="0", shortest=True)
|
||||
output = string_ops.as_string(input_, width=3, fill="0", shortest=True)
|
||||
result = output.eval(feed_dict={input_: complex_inputs_})
|
||||
self.assertAllEqual(clean_nans(result),
|
||||
["(%03g,%03g)" % (x.real, x.imag)
|
||||
for x in complex_inputs_])
|
||||
self.assertAllEqual(
|
||||
clean_nans(result),
|
||||
["(%03g,%03g)" % (x.real, x.imag) for x in complex_inputs_])
|
||||
|
||||
output = tf.as_string(input_, precision=10, width=3)
|
||||
output = string_ops.as_string(input_, precision=10, width=3)
|
||||
result = output.eval(feed_dict={input_: complex_inputs_})
|
||||
self.assertAllEqual(clean_nans(result),
|
||||
["(%03.10f,%03.10f)" % (x.real, x.imag)
|
||||
for x in complex_inputs_])
|
||||
self.assertAllEqual(
|
||||
clean_nans(result),
|
||||
["(%03.10f,%03.10f)" % (x.real, x.imag) for x in complex_inputs_])
|
||||
|
||||
output = tf.as_string(input_,
|
||||
precision=10,
|
||||
width=3,
|
||||
fill="0",
|
||||
shortest=True)
|
||||
output = string_ops.as_string(
|
||||
input_, precision=10, width=3, fill="0", shortest=True)
|
||||
result = output.eval(feed_dict={input_: complex_inputs_})
|
||||
self.assertAllEqual(clean_nans(result),
|
||||
["(%03.10g,%03.10g)" % (x.real, x.imag)
|
||||
for x in complex_inputs_])
|
||||
self.assertAllEqual(
|
||||
clean_nans(result),
|
||||
["(%03.10g,%03.10g)" % (x.real, x.imag) for x in complex_inputs_])
|
||||
|
||||
with self.assertRaisesOpError("Cannot select both"):
|
||||
output = tf.as_string(input_, scientific=True, shortest=True)
|
||||
output = string_ops.as_string(input_, scientific=True, shortest=True)
|
||||
output.eval(feed_dict={input_: complex_inputs_})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,14 +12,22 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for convolution related functionality in tensorflow.ops.nn."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import nn_impl
|
||||
from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
def _upsample_filters(filters, rate):
|
||||
@ -42,13 +50,13 @@ def _upsample_filters(filters, rate):
|
||||
filters_up = np.transpose(filters, [2, 3, 0, 1])
|
||||
ker = np.zeros([rate, rate], dtype=np.float32)
|
||||
ker[0, 0] = 1
|
||||
filters_up = np.kron(filters_up, ker)[:, :, :-(rate-1), :-(rate-1)]
|
||||
filters_up = np.kron(filters_up, ker)[:, :, :-(rate - 1), :-(rate - 1)]
|
||||
# [in_depth, out_depth, h_up, w_up] -> [h_up, w_up, in_depth, out_depth]
|
||||
filters_up = np.transpose(filters_up, [2, 3, 0, 1])
|
||||
return filters_up
|
||||
|
||||
|
||||
class AtrousConv2DTest(tf.test.TestCase):
|
||||
class AtrousConv2DTest(test.TestCase):
|
||||
|
||||
def testAtrousConv2DForward(self):
|
||||
with self.test_session(use_gpu=True):
|
||||
@ -68,9 +76,9 @@ class AtrousConv2DTest(tf.test.TestCase):
|
||||
f_up = _upsample_filters(f, rate)
|
||||
|
||||
for padding in ["SAME", "VALID"]:
|
||||
y1 = tf.nn.atrous_conv2d(x, f, rate, padding=padding)
|
||||
y2 = tf.nn.conv2d(x, f_up, strides=[1, 1, 1, 1],
|
||||
padding=padding)
|
||||
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
|
||||
y2 = nn_ops.conv2d(
|
||||
x, f_up, strides=[1, 1, 1, 1], padding=padding)
|
||||
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
|
||||
|
||||
def testAtrousSequence(self):
|
||||
@ -111,18 +119,18 @@ class AtrousConv2DTest(tf.test.TestCase):
|
||||
|
||||
for rate in range(2, 4):
|
||||
# y1: three atrous_conv2d in a row.
|
||||
y1 = tf.nn.atrous_conv2d(x, f, rate, padding=padding)
|
||||
y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding)
|
||||
y1 = tf.nn.atrous_conv2d(y1, f, rate, padding=padding)
|
||||
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
|
||||
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
|
||||
y1 = nn_ops.atrous_conv2d(y1, f, rate, padding=padding)
|
||||
# y2: space_to_batch, three conv2d in a row, batch_to_space
|
||||
pad_bottom = 0 if height % rate == 0 else rate - height % rate
|
||||
pad_right = 0 if width % rate == 0 else rate - width % rate
|
||||
pad = [[0, pad_bottom], [0, pad_right]]
|
||||
y2 = tf.space_to_batch(x, paddings=pad, block_size=rate)
|
||||
y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
|
||||
y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
|
||||
y2 = tf.nn.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
|
||||
y2 = tf.batch_to_space(y2, crops=pad, block_size=rate)
|
||||
y2 = array_ops.space_to_batch(x, paddings=pad, block_size=rate)
|
||||
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
|
||||
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
|
||||
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
|
||||
y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
|
||||
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
|
||||
|
||||
def testGradient(self):
|
||||
@ -137,19 +145,20 @@ class AtrousConv2DTest(tf.test.TestCase):
|
||||
np.random.seed(1) # Make it reproducible.
|
||||
x_val = np.random.random_sample(x_shape).astype(np.float32)
|
||||
f_val = np.random.random_sample(f_shape).astype(np.float32)
|
||||
x = tf.constant(x_val, name="x", dtype=tf.float32)
|
||||
f = tf.constant(f_val, name="f", dtype=tf.float32)
|
||||
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
|
||||
|
||||
for rate in range(1, 4):
|
||||
output = tf.nn.atrous_conv2d(x, f, rate=rate, padding="SAME")
|
||||
err = tf.test.compute_gradient_error(
|
||||
[x, f], [x_shape, f_shape], output, y_shape)
|
||||
output = nn_ops.atrous_conv2d(x, f, rate=rate, padding="SAME")
|
||||
err = gradient_checker.compute_gradient_error([x, f],
|
||||
[x_shape, f_shape],
|
||||
output, y_shape)
|
||||
print("atrous_conv2d gradient err = %g " % err)
|
||||
err_tolerance = 1e-3
|
||||
self.assertLess(err, err_tolerance)
|
||||
|
||||
|
||||
class AtrousConv2DTransposeTest(tf.test.TestCase):
|
||||
class AtrousConv2DTransposeTest(test.TestCase):
|
||||
|
||||
def testAtrousConv2DTransposeForward(self):
|
||||
with self.test_session(use_gpu=True):
|
||||
@ -167,26 +176,27 @@ class AtrousConv2DTransposeTest(tf.test.TestCase):
|
||||
|
||||
for rate in range(1, 4):
|
||||
f_up = _upsample_filters(f, rate)
|
||||
kernel_height_up = (kernel_height +
|
||||
(kernel_height - 1) * (rate - 1))
|
||||
kernel_height_up = (kernel_height + (kernel_height - 1) *
|
||||
(rate - 1))
|
||||
kernel_width_up = kernel_width + (kernel_width - 1) * (rate - 1)
|
||||
|
||||
for padding in ["SAME", "VALID"]:
|
||||
if padding == "SAME":
|
||||
y_shape = [2, height, width, 2]
|
||||
else:
|
||||
y_shape = [2,
|
||||
height + kernel_height_up - 1,
|
||||
width + kernel_width_up - 1,
|
||||
2]
|
||||
y_shape = [
|
||||
2, height + kernel_height_up - 1,
|
||||
width + kernel_width_up - 1, 2
|
||||
]
|
||||
|
||||
y1 = tf.nn.atrous_conv2d_transpose(x, f, y_shape, rate, padding)
|
||||
y2 = tf.nn.conv2d_transpose(
|
||||
y1 = nn_ops.atrous_conv2d_transpose(x, f, y_shape, rate,
|
||||
padding)
|
||||
y2 = nn_ops.conv2d_transpose(
|
||||
x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
|
||||
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
|
||||
|
||||
|
||||
class AtrousDepthwiseConv2DTest(tf.test.TestCase):
|
||||
class AtrousDepthwiseConv2DTest(test.TestCase):
|
||||
|
||||
def testAtrousDepthwiseConv2DForward(self):
|
||||
strides = [1, 1, 1, 1]
|
||||
@ -207,11 +217,11 @@ class AtrousDepthwiseConv2DTest(tf.test.TestCase):
|
||||
f_up = _upsample_filters(f, rate)
|
||||
|
||||
for padding in ["SAME", "VALID"]:
|
||||
y1 = tf.nn.depthwise_conv2d(x, f, strides, padding,
|
||||
rate=[rate, rate])
|
||||
y2 = tf.nn.depthwise_conv2d(x, f_up, strides, padding)
|
||||
y1 = nn_impl.depthwise_conv2d(
|
||||
x, f, strides, padding, rate=[rate, rate])
|
||||
y2 = nn_impl.depthwise_conv2d(x, f_up, strides, padding)
|
||||
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -13,12 +13,19 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Tests for atrous convolution functionality in tensorflow.ops.nn."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
def upsample_filters(filters, rate):
|
||||
@ -40,13 +47,13 @@ def upsample_filters(filters, rate):
|
||||
num_spatial_dims = len(rate)
|
||||
spatial_shape = np.array(filters.shape[:num_spatial_dims])
|
||||
output_spatial_shape = (spatial_shape - 1) * rate + 1
|
||||
output = np.zeros(tuple(output_spatial_shape) + tuple(filters.shape[-2:]),
|
||||
filters.dtype)
|
||||
output = np.zeros(
|
||||
tuple(output_spatial_shape) + tuple(filters.shape[-2:]), filters.dtype)
|
||||
output[tuple(np.s_[::rate[i]] for i in range(num_spatial_dims))] = filters
|
||||
return output
|
||||
|
||||
|
||||
class AtrousConvolutionTest(tf.test.TestCase):
|
||||
class AtrousConvolutionTest(test.TestCase):
|
||||
|
||||
def _test_atrous_convolution(self, input_shape, filter_shape, dilation_rate,
|
||||
**kwargs):
|
||||
@ -54,9 +61,9 @@ class AtrousConvolutionTest(tf.test.TestCase):
|
||||
np.prod(filter_shape), dtype=np.float32).reshape(filter_shape)
|
||||
filters_upsampled = upsample_filters(filters, dilation_rate)
|
||||
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
|
||||
y1 = tf.nn.convolution(
|
||||
y1 = nn_ops.convolution(
|
||||
input=x, filter=filters, dilation_rate=dilation_rate, **kwargs)
|
||||
y2 = tf.nn.convolution(input=x, filter=filters_upsampled, **kwargs)
|
||||
y2 = nn_ops.convolution(input=x, filter=filters_upsampled, **kwargs)
|
||||
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
|
||||
|
||||
def testAtrousConvolution2D(self):
|
||||
@ -78,12 +85,12 @@ class AtrousConvolutionTest(tf.test.TestCase):
|
||||
for kernel_depth, kernel_height, kernel_width in [[3, 3, 3],
|
||||
[3, 2, 2],
|
||||
[2, 1, 3]]:
|
||||
for dilation_rate in [[1, 1, 1], [3, 3, 3], [3, 2, 3],
|
||||
[3, 1, 2]]:
|
||||
for dilation_rate in [[1, 1, 1], [3, 3, 3], [3, 2, 3], [3, 1, 2]]:
|
||||
self._test_atrous_convolution(
|
||||
input_shape=[2, depth, height, width, 2],
|
||||
filter_shape=[kernel_depth, kernel_height, kernel_width,
|
||||
2, 2],
|
||||
filter_shape=[
|
||||
kernel_depth, kernel_height, kernel_width, 2, 2
|
||||
],
|
||||
padding=padding,
|
||||
dilation_rate=dilation_rate)
|
||||
|
||||
@ -100,7 +107,7 @@ class AtrousConvolutionTest(tf.test.TestCase):
|
||||
dilation_rate=[rate])
|
||||
|
||||
def testAtrousConvolutionNC(self):
|
||||
if tf.test.is_gpu_available(cuda_only=True):
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
# "NCW" and "NCHW" formats are currently supported only on CUDA.
|
||||
with self.test_session(use_gpu=True):
|
||||
for padding in ["SAME", "VALID"]:
|
||||
@ -136,26 +143,28 @@ class AtrousConvolutionTest(tf.test.TestCase):
|
||||
f2 = 1e-2 * np.random.random_sample(f_shape).astype(np.float32)
|
||||
|
||||
def combined_op(converted_input, num_spatial_dims, padding_arg): # pylint: disable=unused-argument
|
||||
result = tf.nn.convolution(
|
||||
input=converted_input, filter=f1, padding=padding) # pylint: disable=cell-var-from-loop
|
||||
result = tf.nn.convolution(
|
||||
input=result, filter=f2, padding=padding) # pylint: disable=cell-var-from-loop
|
||||
result = nn_ops.convolution(
|
||||
input=converted_input, filter=f1,
|
||||
padding=padding) # pylint: disable=cell-var-from-loop
|
||||
result = nn_ops.convolution(
|
||||
input=result, filter=f2,
|
||||
padding=padding) # pylint: disable=cell-var-from-loop
|
||||
return result
|
||||
|
||||
for rate_height in range(2, 4):
|
||||
for rate_width in range(2, 4):
|
||||
dilation_rate = [rate_height, rate_width]
|
||||
y1 = tf.nn.convolution(
|
||||
y1 = nn_ops.convolution(
|
||||
input=x,
|
||||
filter=f1,
|
||||
padding=padding,
|
||||
dilation_rate=dilation_rate)
|
||||
y1 = tf.nn.convolution(
|
||||
y1 = nn_ops.convolution(
|
||||
input=y1,
|
||||
filter=f2,
|
||||
padding=padding,
|
||||
dilation_rate=dilation_rate)
|
||||
y2 = tf.nn.with_space_to_batch(
|
||||
y2 = nn_ops.with_space_to_batch(
|
||||
input=x,
|
||||
dilation_rate=dilation_rate,
|
||||
op=combined_op,
|
||||
@ -166,13 +175,13 @@ class AtrousConvolutionTest(tf.test.TestCase):
|
||||
def _test_gradient(self, x_shape, f_shape, dilation_rate, padding):
|
||||
x_val = np.random.random_sample(x_shape).astype(np.float32)
|
||||
f_val = np.random.random_sample(f_shape).astype(np.float32)
|
||||
x = tf.constant(x_val, name="x", dtype=tf.float32)
|
||||
f = tf.constant(f_val, name="f", dtype=tf.float32)
|
||||
output = tf.nn.convolution(
|
||||
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
|
||||
output = nn_ops.convolution(
|
||||
input=x, filter=f, dilation_rate=dilation_rate, padding=padding)
|
||||
y_shape = output.get_shape().as_list()
|
||||
err = tf.test.compute_gradient_error([x, f], [x_shape, f_shape], output,
|
||||
y_shape)
|
||||
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
|
||||
output, y_shape)
|
||||
err_tolerance = 1e-3
|
||||
self.assertLess(err, err_tolerance)
|
||||
|
||||
@ -189,4 +198,4 @@ class AtrousConvolutionTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for image.extract_glimpse()."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
@ -20,14 +19,17 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import image_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class ExtractGlimpseTest(tf.test.TestCase):
|
||||
class ExtractGlimpseTest(test.TestCase):
|
||||
|
||||
def _VerifyValues(
|
||||
self, tensor_in_sizes, glimpse_sizes, offsets, expected_rows,
|
||||
expected_cols):
|
||||
def _VerifyValues(self, tensor_in_sizes, glimpse_sizes, offsets,
|
||||
expected_rows, expected_cols):
|
||||
"""Verifies the output values of the glimpse extraction kernel.
|
||||
|
||||
Args:
|
||||
@ -49,14 +51,13 @@ class ExtractGlimpseTest(tf.test.TestCase):
|
||||
# [ 3 3 3 ... ]
|
||||
# [ ...
|
||||
# ]
|
||||
t_rows = tf.tile(
|
||||
[[1.0 * r] for r in range(1, rows + 1)], [1, cols],
|
||||
name='tile_rows')
|
||||
t_rows = array_ops.tile(
|
||||
[[1.0 * r] for r in range(1, rows + 1)], [1, cols], name='tile_rows')
|
||||
|
||||
# Shuffle to switch to a convention of (batch_size, height, width, depth).
|
||||
t_rows_4d = tf.transpose(
|
||||
tf.expand_dims(
|
||||
tf.expand_dims(t_rows, 0), 3), [0, 2, 1, 3])
|
||||
t_rows_4d = array_ops.transpose(
|
||||
array_ops.expand_dims(array_ops.expand_dims(t_rows, 0), 3),
|
||||
[0, 2, 1, 3])
|
||||
|
||||
# Column Tensor with entries by column.
|
||||
# [[ 1 2 3 4 ... ]
|
||||
@ -64,24 +65,23 @@ class ExtractGlimpseTest(tf.test.TestCase):
|
||||
# [ 1 2 3 4 ... ]
|
||||
# [ ... ]
|
||||
# ]
|
||||
t_cols = tf.tile(
|
||||
[[1.0 * r for r in range(1, cols + 1)]],
|
||||
[rows, 1], name='tile_cols')
|
||||
t_cols = array_ops.tile(
|
||||
[[1.0 * r for r in range(1, cols + 1)]], [rows, 1], name='tile_cols')
|
||||
|
||||
# Shuffle to switch to a convention of (batch_size, height, width, depth).
|
||||
t_cols_4d = tf.transpose(
|
||||
tf.expand_dims(
|
||||
tf.expand_dims(t_cols, 0), 3), [0, 2, 1, 3])
|
||||
t_cols_4d = array_ops.transpose(
|
||||
array_ops.expand_dims(array_ops.expand_dims(t_cols, 0), 3),
|
||||
[0, 2, 1, 3])
|
||||
|
||||
# extract_glimpses from Row and Column Tensor, respectively.
|
||||
# Switch order for glimpse_sizes and offsets to switch from (row, col)
|
||||
# convention to tensorflows (height, width) convention.
|
||||
t1 = tf.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
|
||||
t2 = tf.constant([offsets[1], offsets[0]], shape=[1, 2])
|
||||
glimpse_rows = (tf.transpose(
|
||||
tf.image.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
|
||||
glimpse_cols = (tf.transpose(
|
||||
tf.image.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
|
||||
t1 = constant_op.constant([glimpse_sizes[1], glimpse_sizes[0]], shape=[2])
|
||||
t2 = constant_op.constant([offsets[1], offsets[0]], shape=[1, 2])
|
||||
glimpse_rows = (array_ops.transpose(
|
||||
image_ops.extract_glimpse(t_rows_4d, t1, t2), [0, 2, 1, 3]))
|
||||
glimpse_cols = (array_ops.transpose(
|
||||
image_ops.extract_glimpse(t_cols_4d, t1, t2), [0, 2, 1, 3]))
|
||||
|
||||
# Evaluate the TensorFlow Graph.
|
||||
with self.test_session() as sess:
|
||||
@ -108,83 +108,94 @@ class ExtractGlimpseTest(tf.test.TestCase):
|
||||
self.assertEqual(value_cols[0][i][j][0], expected_cols[j])
|
||||
|
||||
def testCenterGlimpse(self):
|
||||
self._VerifyValues(tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[3, 5],
|
||||
offsets=[0.0, 0.0],
|
||||
expected_rows=[20, 21, 22],
|
||||
expected_cols=[29, 30, 31, 32, 33])
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[3, 5],
|
||||
offsets=[0.0, 0.0],
|
||||
expected_rows=[20, 21, 22],
|
||||
expected_cols=[29, 30, 31, 32, 33])
|
||||
|
||||
def testEmptyTensor(self):
|
||||
empty_image = np.zeros((0, 4, 3, 0))
|
||||
offsets = np.zeros((0, 2))
|
||||
with self.test_session():
|
||||
result = tf.image.extract_glimpse(empty_image, [1, 1], offsets)
|
||||
self.assertAllEqual(np.zeros((0, 1, 1, 0), dtype=np.float32),
|
||||
result.eval())
|
||||
result = image_ops.extract_glimpse(empty_image, [1, 1], offsets)
|
||||
self.assertAllEqual(
|
||||
np.zeros(
|
||||
(0, 1, 1, 0), dtype=np.float32), result.eval())
|
||||
|
||||
def testLargeCenterGlimpse(self):
|
||||
self._VerifyValues(tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[41, 61],
|
||||
offsets=[0.0, 0.0],
|
||||
expected_rows=list(range(1, 42)),
|
||||
expected_cols=list(range(1, 62)))
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[41, 61],
|
||||
offsets=[0.0, 0.0],
|
||||
expected_rows=list(range(1, 42)),
|
||||
expected_cols=list(range(1, 62)))
|
||||
|
||||
def testTooLargeCenterGlimpse(self):
|
||||
self._VerifyValues(tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[43, 63],
|
||||
offsets=[0.0, 0.0],
|
||||
expected_rows=[None] + list(range(1, 42)) + [None],
|
||||
expected_cols=[None] + list(range(1, 62)) + [None])
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[43, 63],
|
||||
offsets=[0.0, 0.0],
|
||||
expected_rows=[None] + list(range(1, 42)) + [None],
|
||||
expected_cols=[None] + list(range(1, 62)) + [None])
|
||||
|
||||
def testGlimpseFullOverlap(self):
|
||||
self._VerifyValues(tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[3, 5],
|
||||
offsets=[0.1, 0.3],
|
||||
expected_rows=[22, 23, 24],
|
||||
expected_cols=[38, 39, 40, 41, 42])
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[3, 5],
|
||||
offsets=[0.1, 0.3],
|
||||
expected_rows=[22, 23, 24],
|
||||
expected_cols=[38, 39, 40, 41, 42])
|
||||
|
||||
def testGlimpseFullOverlap2(self):
|
||||
self._VerifyValues(tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[11, 3],
|
||||
offsets=[-0.7, -0.7],
|
||||
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
|
||||
expected_cols=[8, 9, 10])
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[11, 3],
|
||||
offsets=[-0.7, -0.7],
|
||||
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
|
||||
expected_cols=[8, 9, 10])
|
||||
|
||||
def testGlimpseBeforeLeftMargin(self):
|
||||
self._VerifyValues(tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[11, 5],
|
||||
offsets=[-0.7, -0.9],
|
||||
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
|
||||
expected_cols=[1, 2, 3, 4, 5])
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[11, 5],
|
||||
offsets=[-0.7, -0.9],
|
||||
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
|
||||
expected_cols=[1, 2, 3, 4, 5])
|
||||
|
||||
def testGlimpseLowerRightCorner(self):
|
||||
self._VerifyValues(tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[7, 5],
|
||||
offsets=[1.0, 1.0],
|
||||
expected_rows=[38, 39, 40, 41, None, None, None],
|
||||
expected_cols=[59, 60, 61, None, None])
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[7, 5],
|
||||
offsets=[1.0, 1.0],
|
||||
expected_rows=[38, 39, 40, 41, None, None, None],
|
||||
expected_cols=[59, 60, 61, None, None])
|
||||
|
||||
def testGlimpseNoOverlap(self):
|
||||
self._VerifyValues(tensor_in_sizes=[20, 30],
|
||||
glimpse_sizes=[3, 3],
|
||||
offsets=[-2.0, 2.0],
|
||||
expected_rows=[None, None, None],
|
||||
expected_cols=[None, None, None])
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[20, 30],
|
||||
glimpse_sizes=[3, 3],
|
||||
offsets=[-2.0, 2.0],
|
||||
expected_rows=[None, None, None],
|
||||
expected_cols=[None, None, None])
|
||||
|
||||
def testGlimpseOnLeftMargin(self):
|
||||
self._VerifyValues(tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[11, 7],
|
||||
offsets=[-0.7, -1.0],
|
||||
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
|
||||
expected_cols=[None, None, None, 1, 2, 3, 4])
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[11, 7],
|
||||
offsets=[-0.7, -1.0],
|
||||
expected_rows=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
|
||||
expected_cols=[None, None, None, 1, 2, 3, 4])
|
||||
|
||||
def testGlimpseUpperMargin(self):
|
||||
self._VerifyValues(tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[7, 5],
|
||||
offsets=[-1, 0.9],
|
||||
expected_rows=[None, None, None, 1, 2, 3, 4],
|
||||
expected_cols=[56, 57, 58, 59, 60])
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[41, 61],
|
||||
glimpse_sizes=[7, 5],
|
||||
offsets=[-1, 0.9],
|
||||
expected_rows=[None, None, None, 1, 2, 3, 4],
|
||||
expected_cols=[56, 57, 58, 59, 60])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,8 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for barrier ops."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
@ -21,21 +21,24 @@ from __future__ import print_function
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import errors_impl
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import data_flow_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class BarrierTest(tf.test.TestCase):
|
||||
class BarrierTest(test.TestCase):
|
||||
|
||||
def testConstructorWithShapes(self):
|
||||
with tf.Graph().as_default():
|
||||
with ops.Graph().as_default():
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
(dtypes.float32, dtypes.float32),
|
||||
shapes=((1, 2, 3), (8,)),
|
||||
shared_name="B",
|
||||
name="B")
|
||||
self.assertTrue(isinstance(b.barrier_ref, tf.Tensor))
|
||||
self.assertTrue(isinstance(b.barrier_ref, ops.Tensor))
|
||||
self.assertProtoEquals("""
|
||||
name:'B' op:'Barrier'
|
||||
attr {
|
||||
@ -66,9 +69,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
def testInsertMany(self):
|
||||
with self.test_session():
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
shapes=((), ()),
|
||||
name="B")
|
||||
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
|
||||
size_t = b.ready_size()
|
||||
self.assertEqual([], size_t.get_shape())
|
||||
keys = [b"a", b"b", b"c"]
|
||||
@ -86,15 +87,12 @@ class BarrierTest(tf.test.TestCase):
|
||||
error_message = ("Empty tensors are not supported, but received shape "
|
||||
r"\'\(0,\)\' at index 1")
|
||||
with self.assertRaisesRegexp(ValueError, error_message):
|
||||
data_flow_ops.Barrier((tf.float32, tf.float32),
|
||||
shapes=((1,), (0,)),
|
||||
name="B")
|
||||
data_flow_ops.Barrier(
|
||||
(dtypes.float32, dtypes.float32), shapes=((1,), (0,)), name="B")
|
||||
|
||||
def testInsertManyEmptyTensorUnknown(self):
|
||||
with self.test_session():
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
name="B")
|
||||
b = data_flow_ops.Barrier((dtypes.float32, dtypes.float32), name="B")
|
||||
size_t = b.ready_size()
|
||||
self.assertEqual([], size_t.get_shape())
|
||||
keys = [b"a", b"b", b"c"]
|
||||
@ -107,9 +105,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
def testTakeMany(self):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
shapes=((), ()),
|
||||
name="B")
|
||||
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
|
||||
size_t = b.ready_size()
|
||||
keys = [b"a", b"b", b"c"]
|
||||
values_0 = [10.0, 20.0, 30.0]
|
||||
@ -122,8 +118,8 @@ class BarrierTest(tf.test.TestCase):
|
||||
insert_1_op.run()
|
||||
self.assertEquals(size_t.eval(), [3])
|
||||
|
||||
indices_val, keys_val, values_0_val, values_1_val = sess.run([
|
||||
take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
|
||||
indices_val, keys_val, values_0_val, values_1_val = sess.run(
|
||||
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
|
||||
|
||||
self.assertAllEqual(indices_val, [-2**63] * 3)
|
||||
for k, v0, v1 in zip(keys, values_0, values_1):
|
||||
@ -134,9 +130,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
def testTakeManySmallBatch(self):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
shapes=((), ()),
|
||||
name="B")
|
||||
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
|
||||
size_t = b.ready_size()
|
||||
size_i = b.incomplete_size()
|
||||
keys = [b"a", b"b", b"c", b"d"]
|
||||
@ -160,9 +154,8 @@ class BarrierTest(tf.test.TestCase):
|
||||
# should return a reduced batch with 2 elements only.
|
||||
self.assertEquals(size_i.eval(), [2]) # assert that incomplete size = 2
|
||||
self.assertEquals(size_t.eval(), [2]) # assert that ready size = 2
|
||||
_, keys_val, values_0_val, values_1_val = sess.run([
|
||||
index_t, key_t, value_list_t[0], value_list_t[1]
|
||||
])
|
||||
_, keys_val, values_0_val, values_1_val = sess.run(
|
||||
[index_t, key_t, value_list_t[0], value_list_t[1]])
|
||||
# Check that correct values have been returned.
|
||||
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
|
||||
idx = keys_val.tolist().index(k)
|
||||
@ -174,9 +167,8 @@ class BarrierTest(tf.test.TestCase):
|
||||
insert_1_2_op.run()
|
||||
self.assertEquals(size_i.eval(), [1]) # assert that incomplete size = 1
|
||||
self.assertEquals(size_t.eval(), [1]) # assert that ready size = 1
|
||||
_, keys_val, values_0_val, values_1_val = sess.run([
|
||||
index_t, key_t, value_list_t[0], value_list_t[1]
|
||||
])
|
||||
_, keys_val, values_0_val, values_1_val = sess.run(
|
||||
[index_t, key_t, value_list_t[0], value_list_t[1]])
|
||||
# Check that correct values have been returned.
|
||||
for k, v0, v1 in zip(keys[2:3], values_0[2:3], values_1[2:3]):
|
||||
idx = keys_val.tolist().index(k)
|
||||
@ -202,15 +194,12 @@ class BarrierTest(tf.test.TestCase):
|
||||
def testUseBarrierWithShape(self):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
shapes=((2, 2), (8,)),
|
||||
name="B")
|
||||
(dtypes.float32, dtypes.float32), shapes=((2, 2), (8,)), name="B")
|
||||
size_t = b.ready_size()
|
||||
keys = [b"a", b"b", b"c"]
|
||||
values_0 = np.array(
|
||||
[[[10.0] * 2] * 2, [[20.0] * 2] * 2, [[30.0] * 2] * 2], np.float32)
|
||||
values_1 = np.array([[100.0] * 8, [200.0] * 8, [300.0] * 8],
|
||||
np.float32)
|
||||
values_1 = np.array([[100.0] * 8, [200.0] * 8, [300.0] * 8], np.float32)
|
||||
insert_0_op = b.insert_many(0, keys, values_0)
|
||||
insert_1_op = b.insert_many(1, keys, values_1)
|
||||
take_t = b.take_many(3)
|
||||
@ -219,8 +208,8 @@ class BarrierTest(tf.test.TestCase):
|
||||
insert_1_op.run()
|
||||
self.assertEquals(size_t.eval(), [3])
|
||||
|
||||
indices_val, keys_val, values_0_val, values_1_val = sess.run([
|
||||
take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
|
||||
indices_val, keys_val, values_0_val, values_1_val = sess.run(
|
||||
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
|
||||
self.assertAllEqual(indices_val, [-2**63] * 3)
|
||||
self.assertShapeEqual(keys_val, take_t[1])
|
||||
self.assertShapeEqual(values_0_val, take_t[2][0])
|
||||
@ -233,7 +222,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
|
||||
def testParallelInsertMany(self):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(tf.float32, shapes=())
|
||||
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
|
||||
size_t = b.ready_size()
|
||||
keys = [str(x).encode("ascii") for x in range(10)]
|
||||
values = [float(x) for x in range(10)]
|
||||
@ -253,7 +242,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
|
||||
def testParallelTakeMany(self):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(tf.float32, shapes=())
|
||||
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
|
||||
size_t = b.ready_size()
|
||||
keys = [str(x).encode("ascii") for x in range(10)]
|
||||
values = [float(x) for x in range(10)]
|
||||
@ -283,12 +272,11 @@ class BarrierTest(tf.test.TestCase):
|
||||
self.assertAllEqual(np.hstack(index_vals), [-2**63] * 10)
|
||||
|
||||
self.assertItemsEqual(
|
||||
zip(keys, values),
|
||||
[(k[0], v[0]) for k, v in zip(key_vals, value_vals)])
|
||||
zip(keys, values), [(k[0], v[0]) for k, v in zip(key_vals, value_vals)])
|
||||
|
||||
def testBlockingTakeMany(self):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(tf.float32, shapes=())
|
||||
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
|
||||
keys = [str(x).encode("ascii") for x in range(10)]
|
||||
values = [float(x) for x in range(10)]
|
||||
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
|
||||
@ -297,8 +285,8 @@ class BarrierTest(tf.test.TestCase):
|
||||
def take():
|
||||
indices_val, keys_val, values_val = sess.run(
|
||||
[take_t[0], take_t[1], take_t[2][0]])
|
||||
self.assertAllEqual(
|
||||
indices_val, [int(x.decode("ascii")) - 2**63 for x in keys_val])
|
||||
self.assertAllEqual(indices_val,
|
||||
[int(x.decode("ascii")) - 2**63 for x in keys_val])
|
||||
self.assertItemsEqual(zip(keys, values), zip(keys_val, values_val))
|
||||
|
||||
t = self.checkedThread(target=take)
|
||||
@ -311,28 +299,32 @@ class BarrierTest(tf.test.TestCase):
|
||||
def testParallelInsertManyTakeMany(self):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.int64), shapes=((), (2,)))
|
||||
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
|
||||
num_iterations = 100
|
||||
keys = [str(x) for x in range(10)]
|
||||
values_0 = np.asarray(range(10), dtype=np.float32)
|
||||
values_1 = np.asarray([[x+1, x + 2] for x in range(10)], dtype=np.int64)
|
||||
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
|
||||
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
|
||||
insert_0_ops = [
|
||||
b.insert_many(0, keys_i(i), values_0 + i)
|
||||
for i in range(num_iterations)]
|
||||
for i in range(num_iterations)
|
||||
]
|
||||
insert_1_ops = [
|
||||
b.insert_many(1, keys_i(i), values_1 + i)
|
||||
for i in range(num_iterations)]
|
||||
for i in range(num_iterations)
|
||||
]
|
||||
take_ops = [b.take_many(10) for _ in range(num_iterations)]
|
||||
|
||||
def take(sess, i, taken):
|
||||
indices_val, keys_val, values_0_val, values_1_val = sess.run(
|
||||
[take_ops[i][0], take_ops[i][1],
|
||||
take_ops[i][2][0], take_ops[i][2][1]])
|
||||
taken.append({"indices": indices_val,
|
||||
"keys": keys_val,
|
||||
"values_0": values_0_val,
|
||||
"values_1": values_1_val})
|
||||
indices_val, keys_val, values_0_val, values_1_val = sess.run([
|
||||
take_ops[i][0], take_ops[i][1], take_ops[i][2][0], take_ops[i][2][1]
|
||||
])
|
||||
taken.append({
|
||||
"indices": indices_val,
|
||||
"keys": keys_val,
|
||||
"values_0": values_0_val,
|
||||
"values_1": values_1_val
|
||||
})
|
||||
|
||||
def insert(sess, i):
|
||||
sess.run([insert_0_ops[i], insert_1_ops[i]])
|
||||
@ -340,11 +332,13 @@ class BarrierTest(tf.test.TestCase):
|
||||
taken = []
|
||||
|
||||
take_threads = [
|
||||
self.checkedThread(target=take, args=(sess, i, taken))
|
||||
for i in range(num_iterations)]
|
||||
self.checkedThread(
|
||||
target=take, args=(sess, i, taken)) for i in range(num_iterations)
|
||||
]
|
||||
insert_threads = [
|
||||
self.checkedThread(target=insert, args=(sess, i))
|
||||
for i in range(num_iterations)]
|
||||
self.checkedThread(
|
||||
target=insert, args=(sess, i)) for i in range(num_iterations)
|
||||
]
|
||||
|
||||
for t in take_threads:
|
||||
t.start()
|
||||
@ -361,10 +355,10 @@ class BarrierTest(tf.test.TestCase):
|
||||
all_indices = sorted(flatten([t_i["indices"] for t_i in taken]))
|
||||
all_keys = sorted(flatten([t_i["keys"] for t_i in taken]))
|
||||
|
||||
expected_keys = sorted(flatten(
|
||||
[keys_i(i) for i in range(num_iterations)]))
|
||||
expected_indices = sorted(flatten(
|
||||
[-2**63 + j] * 10 for j in range(num_iterations)))
|
||||
expected_keys = sorted(
|
||||
flatten([keys_i(i) for i in range(num_iterations)]))
|
||||
expected_indices = sorted(
|
||||
flatten([-2**63 + j] * 10 for j in range(num_iterations)))
|
||||
|
||||
self.assertAllEqual(all_indices, expected_indices)
|
||||
self.assertAllEqual(all_keys, expected_keys)
|
||||
@ -384,9 +378,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
def testClose(self):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
shapes=((), ()),
|
||||
name="B")
|
||||
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
|
||||
size_t = b.ready_size()
|
||||
incomplete_t = b.incomplete_size()
|
||||
keys = [b"a", b"b", b"c"]
|
||||
@ -422,9 +414,8 @@ class BarrierTest(tf.test.TestCase):
|
||||
|
||||
# This op should fail because we requested more elements than are
|
||||
# available in incomplete + ready queue.
|
||||
with self.assertRaisesOpError(
|
||||
r"is closed and has insufficient elements "
|
||||
r"\(requested 4, total size 3\)"):
|
||||
with self.assertRaisesOpError(r"is closed and has insufficient elements "
|
||||
r"\(requested 4, total size 3\)"):
|
||||
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
|
||||
|
||||
# This op should succeed because there are still completed elements
|
||||
@ -445,9 +436,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
def testCancel(self):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
shapes=((), ()),
|
||||
name="B")
|
||||
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
|
||||
size_t = b.ready_size()
|
||||
incomplete_t = b.incomplete_size()
|
||||
keys = [b"a", b"b", b"c"]
|
||||
@ -478,9 +467,8 @@ class BarrierTest(tf.test.TestCase):
|
||||
|
||||
# This op should fail because we requested more elements than are
|
||||
# available in incomplete + ready queue.
|
||||
with self.assertRaisesOpError(
|
||||
r"is closed and has insufficient elements "
|
||||
r"\(requested 3, total size 2\)"):
|
||||
with self.assertRaisesOpError(r"is closed and has insufficient elements "
|
||||
r"\(requested 3, total size 2\)"):
|
||||
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
|
||||
|
||||
# This op should succeed because there are still completed elements
|
||||
@ -501,7 +489,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
def _testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self, cancel):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32), shapes=((), ()), name="B")
|
||||
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
|
||||
take_t = b.take_many(1, allow_small_batch=True)
|
||||
sess.run(b.close(cancel))
|
||||
with self.assertRaisesOpError("is closed and has insufficient elements"):
|
||||
@ -514,7 +502,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
def _testParallelInsertManyTakeManyCloseHalfwayThrough(self, cancel):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.int64), shapes=((), (2,)))
|
||||
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
|
||||
num_iterations = 50
|
||||
keys = [str(x) for x in range(10)]
|
||||
values_0 = np.asarray(range(10), dtype=np.float32)
|
||||
@ -522,39 +510,44 @@ class BarrierTest(tf.test.TestCase):
|
||||
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
|
||||
insert_0_ops = [
|
||||
b.insert_many(0, keys_i(i), values_0 + i)
|
||||
for i in range(num_iterations)]
|
||||
for i in range(num_iterations)
|
||||
]
|
||||
insert_1_ops = [
|
||||
b.insert_many(1, keys_i(i), values_1 + i)
|
||||
for i in range(num_iterations)]
|
||||
for i in range(num_iterations)
|
||||
]
|
||||
take_ops = [b.take_many(10) for _ in range(num_iterations)]
|
||||
close_op = b.close(cancel_pending_enqueues=cancel)
|
||||
|
||||
def take(sess, i, taken):
|
||||
try:
|
||||
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
|
||||
[take_ops[i][0], take_ops[i][1],
|
||||
take_ops[i][2][0], take_ops[i][2][1]])
|
||||
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
|
||||
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
|
||||
take_ops[i][2][1]
|
||||
])
|
||||
taken.append(len(indices_val))
|
||||
except tf.errors.OutOfRangeError:
|
||||
except errors_impl.OutOfRangeError:
|
||||
taken.append(0)
|
||||
|
||||
def insert(sess, i):
|
||||
try:
|
||||
sess.run([insert_0_ops[i], insert_1_ops[i]])
|
||||
except tf.errors.CancelledError:
|
||||
except errors_impl.CancelledError:
|
||||
pass
|
||||
|
||||
taken = []
|
||||
|
||||
take_threads = [
|
||||
self.checkedThread(target=take, args=(sess, i, taken))
|
||||
for i in range(num_iterations)]
|
||||
self.checkedThread(
|
||||
target=take, args=(sess, i, taken)) for i in range(num_iterations)
|
||||
]
|
||||
insert_threads = [
|
||||
self.checkedThread(target=insert, args=(sess, i))
|
||||
for i in range(num_iterations)]
|
||||
self.checkedThread(
|
||||
target=insert, args=(sess, i)) for i in range(num_iterations)
|
||||
]
|
||||
|
||||
first_half_insert_threads = insert_threads[:num_iterations//2]
|
||||
second_half_insert_threads = insert_threads[num_iterations//2:]
|
||||
first_half_insert_threads = insert_threads[:num_iterations // 2]
|
||||
second_half_insert_threads = insert_threads[num_iterations // 2:]
|
||||
|
||||
for t in take_threads:
|
||||
t.start()
|
||||
@ -573,7 +566,8 @@ class BarrierTest(tf.test.TestCase):
|
||||
t.join()
|
||||
|
||||
self.assertEqual(
|
||||
sorted(taken), [0] * (num_iterations//2) + [10] * (num_iterations//2))
|
||||
sorted(taken),
|
||||
[0] * (num_iterations // 2) + [10] * (num_iterations // 2))
|
||||
|
||||
def testParallelInsertManyTakeManyCloseHalfwayThrough(self):
|
||||
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=False)
|
||||
@ -584,38 +578,47 @@ class BarrierTest(tf.test.TestCase):
|
||||
def _testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self, cancel):
|
||||
with self.test_session() as sess:
|
||||
b = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.int64), shapes=((), (2,)))
|
||||
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
|
||||
num_iterations = 100
|
||||
keys = [str(x) for x in range(10)]
|
||||
values_0 = np.asarray(range(10), dtype=np.float32)
|
||||
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
|
||||
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
|
||||
insert_0_ops = [
|
||||
b.insert_many(0, keys_i(i), values_0 + i, name="insert_0_%d" % i)
|
||||
for i in range(num_iterations)]
|
||||
b.insert_many(
|
||||
0, keys_i(i), values_0 + i, name="insert_0_%d" % i)
|
||||
for i in range(num_iterations)
|
||||
]
|
||||
|
||||
close_op = b.close(cancel_pending_enqueues=cancel)
|
||||
|
||||
take_ops = [b.take_many(10, name="take_%d" % i)
|
||||
for i in range(num_iterations)]
|
||||
take_ops = [
|
||||
b.take_many(
|
||||
10, name="take_%d" % i) for i in range(num_iterations)
|
||||
]
|
||||
# insert_1_ops will only run after closure
|
||||
insert_1_ops = [
|
||||
b.insert_many(1, keys_i(i), values_1 + i, name="insert_1_%d" % i)
|
||||
for i in range(num_iterations)]
|
||||
b.insert_many(
|
||||
1, keys_i(i), values_1 + i, name="insert_1_%d" % i)
|
||||
for i in range(num_iterations)
|
||||
]
|
||||
|
||||
def take(sess, i, taken):
|
||||
if cancel:
|
||||
try:
|
||||
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
|
||||
[take_ops[i][0], take_ops[i][1],
|
||||
take_ops[i][2][0], take_ops[i][2][1]])
|
||||
[
|
||||
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
|
||||
take_ops[i][2][1]
|
||||
])
|
||||
taken.append(len(indices_val))
|
||||
except tf.errors.OutOfRangeError:
|
||||
except errors_impl.OutOfRangeError:
|
||||
taken.append(0)
|
||||
else:
|
||||
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
|
||||
[take_ops[i][0], take_ops[i][1],
|
||||
take_ops[i][2][0], take_ops[i][2][1]])
|
||||
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
|
||||
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
|
||||
take_ops[i][2][1]
|
||||
])
|
||||
taken.append(len(indices_val))
|
||||
|
||||
def insert_0(sess, i):
|
||||
@ -625,7 +628,7 @@ class BarrierTest(tf.test.TestCase):
|
||||
if cancel:
|
||||
try:
|
||||
insert_1_ops[i].run(session=sess)
|
||||
except tf.errors.CancelledError:
|
||||
except errors_impl.CancelledError:
|
||||
pass
|
||||
else:
|
||||
insert_1_ops[i].run(session=sess)
|
||||
@ -633,14 +636,17 @@ class BarrierTest(tf.test.TestCase):
|
||||
taken = []
|
||||
|
||||
take_threads = [
|
||||
self.checkedThread(target=take, args=(sess, i, taken))
|
||||
for i in range(num_iterations)]
|
||||
self.checkedThread(
|
||||
target=take, args=(sess, i, taken)) for i in range(num_iterations)
|
||||
]
|
||||
insert_0_threads = [
|
||||
self.checkedThread(target=insert_0, args=(sess, i))
|
||||
for i in range(num_iterations)]
|
||||
self.checkedThread(
|
||||
target=insert_0, args=(sess, i)) for i in range(num_iterations)
|
||||
]
|
||||
insert_1_threads = [
|
||||
self.checkedThread(target=insert_1, args=(sess, i))
|
||||
for i in range(num_iterations)]
|
||||
self.checkedThread(
|
||||
target=insert_1, args=(sess, i)) for i in range(num_iterations)
|
||||
]
|
||||
|
||||
for t in insert_0_threads:
|
||||
t.start()
|
||||
@ -672,39 +678,36 @@ class BarrierTest(tf.test.TestCase):
|
||||
def testIncompatibleSharedBarrierErrors(self):
|
||||
with self.test_session():
|
||||
# Do component types and shapes.
|
||||
b_a_1 = data_flow_ops.Barrier((tf.float32,), shapes=(()),
|
||||
shared_name="b_a")
|
||||
b_a_2 = data_flow_ops.Barrier((tf.int32,), shapes=(()),
|
||||
shared_name="b_a")
|
||||
b_a_1 = data_flow_ops.Barrier(
|
||||
(dtypes.float32,), shapes=(()), shared_name="b_a")
|
||||
b_a_2 = data_flow_ops.Barrier(
|
||||
(dtypes.int32,), shapes=(()), shared_name="b_a")
|
||||
b_a_1.barrier_ref.eval()
|
||||
with self.assertRaisesOpError("component types"):
|
||||
b_a_2.barrier_ref.eval()
|
||||
|
||||
b_b_1 = data_flow_ops.Barrier((tf.float32,), shapes=(()),
|
||||
shared_name="b_b")
|
||||
b_b_1 = data_flow_ops.Barrier(
|
||||
(dtypes.float32,), shapes=(()), shared_name="b_b")
|
||||
b_b_2 = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.int32),
|
||||
shapes=((), ()),
|
||||
shared_name="b_b")
|
||||
(dtypes.float32, dtypes.int32), shapes=((), ()), shared_name="b_b")
|
||||
b_b_1.barrier_ref.eval()
|
||||
with self.assertRaisesOpError("component types"):
|
||||
b_b_2.barrier_ref.eval()
|
||||
|
||||
b_c_1 = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
(dtypes.float32, dtypes.float32),
|
||||
shapes=((2, 2), (8,)),
|
||||
shared_name="b_c")
|
||||
b_c_2 = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32), shared_name="b_c")
|
||||
(dtypes.float32, dtypes.float32), shared_name="b_c")
|
||||
b_c_1.barrier_ref.eval()
|
||||
with self.assertRaisesOpError("component shapes"):
|
||||
b_c_2.barrier_ref.eval()
|
||||
|
||||
b_d_1 = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32), shapes=((), ()),
|
||||
shared_name="b_d")
|
||||
(dtypes.float32, dtypes.float32), shapes=((), ()), shared_name="b_d")
|
||||
b_d_2 = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
(dtypes.float32, dtypes.float32),
|
||||
shapes=((2, 2), (8,)),
|
||||
shared_name="b_d")
|
||||
b_d_1.barrier_ref.eval()
|
||||
@ -712,11 +715,11 @@ class BarrierTest(tf.test.TestCase):
|
||||
b_d_2.barrier_ref.eval()
|
||||
|
||||
b_e_1 = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
(dtypes.float32, dtypes.float32),
|
||||
shapes=((2, 2), (8,)),
|
||||
shared_name="b_e")
|
||||
b_e_2 = data_flow_ops.Barrier(
|
||||
(tf.float32, tf.float32),
|
||||
(dtypes.float32, dtypes.float32),
|
||||
shapes=((2, 5), (8,)),
|
||||
shared_name="b_e")
|
||||
b_e_1.barrier_ref.eval()
|
||||
@ -725,4 +728,4 @@ class BarrierTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -13,28 +13,36 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Functional tests for basic component wise operations using a GPU device."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
import math
|
||||
import numpy as np
|
||||
from tensorflow.python.ops import gen_math_ops
|
||||
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
|
||||
|
||||
class GPUBinaryOpsTest(tf.test.TestCase):
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import gen_math_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class GPUBinaryOpsTest(test.TestCase):
|
||||
|
||||
def _compareGPU(self, x, y, np_func, tf_func):
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
inx = tf.convert_to_tensor(x)
|
||||
iny = tf.convert_to_tensor(y)
|
||||
inx = ops.convert_to_tensor(x)
|
||||
iny = ops.convert_to_tensor(y)
|
||||
out = tf_func(inx, iny)
|
||||
tf_gpu = sess.run(out)
|
||||
|
||||
with self.test_session(use_gpu=False) as sess:
|
||||
inx = tf.convert_to_tensor(x)
|
||||
iny = tf.convert_to_tensor(y)
|
||||
inx = ops.convert_to_tensor(x)
|
||||
iny = ops.convert_to_tensor(y)
|
||||
out = tf_func(inx, iny)
|
||||
tf_cpu = sess.run(out)
|
||||
|
||||
@ -43,43 +51,44 @@ class GPUBinaryOpsTest(tf.test.TestCase):
|
||||
def testFloatBasic(self):
|
||||
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
|
||||
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
|
||||
self._compareGPU(x, y, np.add, tf.add)
|
||||
self._compareGPU(x, y, np.subtract, tf.sub)
|
||||
self._compareGPU(x, y, np.multiply, tf.mul)
|
||||
self._compareGPU(x, y + 0.1, np.true_divide, tf.truediv)
|
||||
self._compareGPU(x, y + 0.1, np.floor_divide, tf.floordiv)
|
||||
self._compareGPU(x, y, np.power, tf.pow)
|
||||
self._compareGPU(x, y, np.add, math_ops.add)
|
||||
self._compareGPU(x, y, np.subtract, math_ops.sub)
|
||||
self._compareGPU(x, y, np.multiply, math_ops.mul)
|
||||
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
|
||||
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
|
||||
self._compareGPU(x, y, np.power, math_ops.pow)
|
||||
|
||||
def testFloatWithBCast(self):
|
||||
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
|
||||
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
|
||||
self._compareGPU(x, y, np.add, tf.add)
|
||||
self._compareGPU(x, y, np.subtract, tf.sub)
|
||||
self._compareGPU(x, y, np.multiply, tf.mul)
|
||||
self._compareGPU(x, y + 0.1, np.true_divide, tf.truediv)
|
||||
self._compareGPU(x, y, np.add, math_ops.add)
|
||||
self._compareGPU(x, y, np.subtract, math_ops.sub)
|
||||
self._compareGPU(x, y, np.multiply, math_ops.mul)
|
||||
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
|
||||
|
||||
def testDoubleBasic(self):
|
||||
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
|
||||
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
|
||||
self._compareGPU(x, y, np.add, tf.add)
|
||||
self._compareGPU(x, y, np.subtract, tf.sub)
|
||||
self._compareGPU(x, y, np.multiply, tf.mul)
|
||||
self._compareGPU(x, y + 0.1, np.true_divide, tf.truediv)
|
||||
self._compareGPU(x, y, np.add, math_ops.add)
|
||||
self._compareGPU(x, y, np.subtract, math_ops.sub)
|
||||
self._compareGPU(x, y, np.multiply, math_ops.mul)
|
||||
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
|
||||
|
||||
def testDoubleWithBCast(self):
|
||||
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
|
||||
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
|
||||
self._compareGPU(x, y, np.add, tf.add)
|
||||
self._compareGPU(x, y, np.subtract, tf.sub)
|
||||
self._compareGPU(x, y, np.multiply, tf.mul)
|
||||
self._compareGPU(x, y + 0.1, np.true_divide, tf.truediv)
|
||||
self._compareGPU(x, y, np.add, math_ops.add)
|
||||
self._compareGPU(x, y, np.subtract, math_ops.sub)
|
||||
self._compareGPU(x, y, np.multiply, math_ops.mul)
|
||||
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
|
||||
|
||||
|
||||
class MathBuiltinUnaryTest(tf.test.TestCase):
|
||||
class MathBuiltinUnaryTest(test.TestCase):
|
||||
|
||||
def _compare(self, x, np_func, tf_func, use_gpu):
|
||||
np_out = np_func(x)
|
||||
with self.test_session(use_gpu=use_gpu) as sess:
|
||||
inx = tf.convert_to_tensor(x)
|
||||
inx = ops.convert_to_tensor(x)
|
||||
ofunc = tf_func(inx)
|
||||
tf_out = sess.run(ofunc)
|
||||
self.assertAllClose(np_out, tf_out)
|
||||
@ -92,44 +101,48 @@ class MathBuiltinUnaryTest(tf.test.TestCase):
|
||||
|
||||
def _testDtype(self, dtype, use_gpu):
|
||||
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
|
||||
self._compare(data, np.abs, tf.abs, use_gpu)
|
||||
self._compare(data, np.arccos, tf.acos, use_gpu)
|
||||
self._compare(data, np.arcsin, tf.asin, use_gpu)
|
||||
self._compare(data, np.arctan, tf.atan, use_gpu)
|
||||
self._compare(data, np.ceil, tf.ceil, use_gpu)
|
||||
self._compare(data, np.cos, tf.cos, use_gpu)
|
||||
self._compare(data, np.exp, tf.exp, use_gpu)
|
||||
self._compare(data, np.floor, tf.floor, use_gpu)
|
||||
self._compare(data, np.log, tf.log, use_gpu)
|
||||
self._compare(data, np.log1p, tf.log1p, use_gpu)
|
||||
self._compare(data, np.negative, tf.neg, use_gpu)
|
||||
self._compare(data, self._rsqrt, tf.rsqrt, use_gpu)
|
||||
self._compare(data, np.sin, tf.sin, use_gpu)
|
||||
self._compare(data, np.sqrt, tf.sqrt, use_gpu)
|
||||
self._compare(data, np.square, tf.square, use_gpu)
|
||||
self._compare(data, np.tan, tf.tan, use_gpu)
|
||||
self._compare(data, np.tanh, tf.tanh, use_gpu)
|
||||
self._compare(data, np.abs, math_ops.abs, use_gpu)
|
||||
self._compare(data, np.arccos, math_ops.acos, use_gpu)
|
||||
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
|
||||
self._compare(data, np.arctan, math_ops.atan, use_gpu)
|
||||
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
|
||||
self._compare(data, np.cos, math_ops.cos, use_gpu)
|
||||
self._compare(data, np.exp, math_ops.exp, use_gpu)
|
||||
self._compare(data, np.floor, math_ops.floor, use_gpu)
|
||||
self._compare(data, np.log, math_ops.log, use_gpu)
|
||||
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
|
||||
self._compare(data, np.negative, math_ops.neg, use_gpu)
|
||||
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
|
||||
self._compare(data, np.sin, math_ops.sin, use_gpu)
|
||||
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
|
||||
self._compare(data, np.square, math_ops.square, use_gpu)
|
||||
self._compare(data, np.tan, math_ops.tan, use_gpu)
|
||||
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
|
||||
|
||||
def testTypes(self):
|
||||
for dtype in [np.float32]:
|
||||
self._testDtype(dtype, use_gpu=True)
|
||||
|
||||
def testFloorDevide(self):
|
||||
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape([1, 3, 2])
|
||||
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape([1, 3, 2])
|
||||
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
|
||||
[1, 3, 2])
|
||||
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
|
||||
[1, 3, 2])
|
||||
|
||||
np_out = np.floor_divide(x, y + 0.1)
|
||||
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
inx = tf.convert_to_tensor(x)
|
||||
iny = tf.convert_to_tensor(y + 0.1)
|
||||
inx = ops.convert_to_tensor(x)
|
||||
iny = ops.convert_to_tensor(y + 0.1)
|
||||
ofunc = inx / iny
|
||||
out_func2 = tf.floor(ofunc)
|
||||
out_func2 = math_ops.floor(ofunc)
|
||||
tf_out = sess.run(out_func2)
|
||||
|
||||
self.assertAllClose(np_out, tf_out)
|
||||
|
||||
class BroadcastSimpleTest(tf.test.TestCase):
|
||||
|
||||
class BroadcastSimpleTest(test.TestCase):
|
||||
|
||||
def _GetGradientArgs(self, xs, ys):
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
return sess.run(_broadcast_gradient_args(xs, ys))
|
||||
@ -139,53 +152,55 @@ class BroadcastSimpleTest(tf.test.TestCase):
|
||||
self.assertAllEqual(r0, [])
|
||||
self.assertAllEqual(r1, [0, 1, 2])
|
||||
|
||||
_GRAD_TOL = {tf.float32: 1e-3}
|
||||
_GRAD_TOL = {dtypes.float32: 1e-3}
|
||||
|
||||
def _compareGradientX(self, x, y, np_func, tf_func,
|
||||
numeric_gradient_type=None):
|
||||
def _compareGradientX(self,
|
||||
x,
|
||||
y,
|
||||
np_func,
|
||||
tf_func,
|
||||
numeric_gradient_type=None):
|
||||
z = np_func(x, y)
|
||||
zs = list(z.shape)
|
||||
with self.test_session():
|
||||
inx = tf.convert_to_tensor(x)
|
||||
iny = tf.convert_to_tensor(y)
|
||||
inx = ops.convert_to_tensor(x)
|
||||
iny = ops.convert_to_tensor(y)
|
||||
if x.dtype in (np.float32, np.float64):
|
||||
out = 1.1 * tf_func(inx, iny)
|
||||
else:
|
||||
out = tf_func(inx, iny)
|
||||
xs = list(x.shape)
|
||||
jacob_t, jacob_n = tf.test.compute_gradient(inx,
|
||||
xs,
|
||||
out,
|
||||
zs,
|
||||
x_init_value=x)
|
||||
tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
|
||||
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
||||
inx, xs, out, zs, x_init_value=x)
|
||||
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
|
||||
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
|
||||
|
||||
def _compareGradientY(self, x, y, np_func, tf_func,
|
||||
def _compareGradientY(self,
|
||||
x,
|
||||
y,
|
||||
np_func,
|
||||
tf_func,
|
||||
numeric_gradient_type=None):
|
||||
z = np_func(x, y)
|
||||
zs = list(z.shape)
|
||||
with self.test_session():
|
||||
inx = tf.convert_to_tensor(x)
|
||||
iny = tf.convert_to_tensor(y)
|
||||
inx = ops.convert_to_tensor(x)
|
||||
iny = ops.convert_to_tensor(y)
|
||||
if x.dtype in (np.float32, np.float64):
|
||||
out = 1.1 * tf_func(inx, iny)
|
||||
else:
|
||||
out = tf_func(inx, iny)
|
||||
ys = list(np.shape(y))
|
||||
jacob_t, jacob_n = tf.test.compute_gradient(iny,
|
||||
ys,
|
||||
out,
|
||||
zs,
|
||||
x_init_value=y)
|
||||
tol = self._GRAD_TOL[tf.as_dtype(x.dtype)]
|
||||
jacob_t, jacob_n = gradient_checker.compute_gradient(
|
||||
iny, ys, out, zs, x_init_value=y)
|
||||
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
|
||||
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
|
||||
|
||||
def _compareGpu(self, x, y, np_func, tf_func):
|
||||
np_ans = np_func(x, y)
|
||||
with self.test_session(use_gpu=True):
|
||||
inx = tf.convert_to_tensor(x)
|
||||
iny = tf.convert_to_tensor(y)
|
||||
inx = ops.convert_to_tensor(x)
|
||||
iny = ops.convert_to_tensor(y)
|
||||
out = tf_func(inx, iny)
|
||||
tf_gpu = out.eval()
|
||||
self.assertAllClose(np_ans, tf_gpu)
|
||||
@ -193,13 +208,16 @@ class BroadcastSimpleTest(tf.test.TestCase):
|
||||
# TODO(zhifengc/ke): make gradient checker work on GPU.
|
||||
|
||||
def testGradient(self):
|
||||
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape([1, 3, 2])
|
||||
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape([1, 3, 2])
|
||||
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
|
||||
[1, 3, 2])
|
||||
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
|
||||
[1, 3, 2])
|
||||
|
||||
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
|
||||
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
|
||||
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
|
||||
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
|
||||
|
||||
self._compareGradientX(x , y, np.true_divide, tf.truediv)
|
||||
self._compareGradientY(x, y, np.true_divide, tf.truediv)
|
||||
self._compareGpu(x, y, np.true_divide, tf.truediv)
|
||||
self._compareGpu(x, y +0.1 , np.floor_divide, tf.floordiv)
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -13,15 +13,21 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Tests for tensorflow.ops.tf.BatchMatMul."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class BatchMatmulOpTest(tf.test.TestCase):
|
||||
class BatchMatmulOpTest(test.TestCase):
|
||||
|
||||
# Uses numpy to compute batch_matmul(x, y, adjoint_a, adjoint_b).
|
||||
def _npBatchMatmul(self, x, y, adjoint_a, adjoint_b):
|
||||
@ -79,12 +85,13 @@ class BatchMatmulOpTest(tf.test.TestCase):
|
||||
tol = 100 * np.finfo(x.dtype).eps if is_floating else 0
|
||||
with self.test_session(use_gpu=is_floating) as sess:
|
||||
if static_shape:
|
||||
z0 = tf.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
|
||||
z0 = math_ops.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
|
||||
z0_val = z0.eval()
|
||||
else:
|
||||
x_ph = tf.placeholder(x.dtype)
|
||||
y_ph = tf.placeholder(y.dtype)
|
||||
z0 = tf.matmul(x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
|
||||
x_ph = array_ops.placeholder(x.dtype)
|
||||
y_ph = array_ops.placeholder(y.dtype)
|
||||
z0 = math_ops.matmul(
|
||||
x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
|
||||
z0_val = sess.run(z0, feed_dict={x_ph: x, y_ph: y})
|
||||
z1 = self._npBatchMatmul(x, y, adjoint_a, adjoint_b)
|
||||
self.assertAllClose(z0_val, z1, rtol=tol, atol=tol)
|
||||
@ -135,7 +142,7 @@ def _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape):
|
||||
return Test
|
||||
|
||||
|
||||
class BatchMatmulGradientTest(tf.test.TestCase):
|
||||
class BatchMatmulGradientTest(test.TestCase):
|
||||
|
||||
# loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the
|
||||
# gradient checker.
|
||||
@ -147,12 +154,12 @@ class BatchMatmulGradientTest(tf.test.TestCase):
|
||||
epsilon = np.finfo(x.dtype).eps
|
||||
delta = epsilon**(1.0 / 3.0)
|
||||
with self.test_session(use_gpu=True):
|
||||
inx = tf.constant(x)
|
||||
iny = tf.constant(y)
|
||||
z = tf.matmul(inx, iny, adjoint_a, adjoint_b)
|
||||
loss = tf.reduce_sum(z)
|
||||
inx = constant_op.constant(x)
|
||||
iny = constant_op.constant(y)
|
||||
z = math_ops.matmul(inx, iny, adjoint_a, adjoint_b)
|
||||
loss = math_ops.reduce_sum(z)
|
||||
((x_jacob_t, x_jacob_n),
|
||||
(y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(
|
||||
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
|
||||
[inx, iny], [x.shape, y.shape],
|
||||
loss, [1],
|
||||
x_init_value=[x, y],
|
||||
@ -196,4 +203,4 @@ if __name__ == "__main__":
|
||||
if dtype_ is not np.int32:
|
||||
setattr(BatchMatmulGradientTest, "testBatchMatmulGradient_" + name,
|
||||
_GetBatchMatmulGradientTest(dtype_, adjoint_a_, adjoint_b_))
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Functional tests for BatchToSpace op.
|
||||
|
||||
Additional tests are included in spacetobatch_op_test.py, where the BatchToSpace
|
||||
@ -24,16 +23,20 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gen_array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class PythonOpImpl(object):
|
||||
|
||||
@staticmethod
|
||||
def batch_to_space(*args, **kwargs):
|
||||
return tf.batch_to_space(*args, **kwargs)
|
||||
return array_ops.batch_to_space(*args, **kwargs)
|
||||
|
||||
|
||||
class CppOpImpl(object):
|
||||
@ -43,7 +46,7 @@ class CppOpImpl(object):
|
||||
return gen_array_ops._batch_to_space(*args, **kwargs)
|
||||
|
||||
|
||||
class BatchToSpaceDepthToSpace(tf.test.TestCase, PythonOpImpl):
|
||||
class BatchToSpaceDepthToSpace(test.TestCase, PythonOpImpl):
|
||||
|
||||
# Verifies that: batch_to_space(x) = transpose(depth_to_space(transpose(x)))
|
||||
def testDepthToSpaceTranspose(self):
|
||||
@ -51,10 +54,10 @@ class BatchToSpaceDepthToSpace(tf.test.TestCase, PythonOpImpl):
|
||||
block_size = 2
|
||||
crops = np.zeros((2, 2), dtype=np.int32)
|
||||
y1 = self.batch_to_space(x, crops, block_size=block_size)
|
||||
y2 = tf.transpose(
|
||||
tf.depth_to_space(
|
||||
tf.transpose(x, [3, 1, 2, 0]),
|
||||
block_size=block_size), [3, 1, 2, 0])
|
||||
y2 = array_ops.transpose(
|
||||
array_ops.depth_to_space(
|
||||
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
|
||||
[3, 1, 2, 0])
|
||||
with self.test_session():
|
||||
self.assertAllEqual(y1.eval(), y2.eval())
|
||||
|
||||
@ -63,7 +66,7 @@ class BatchToSpaceDepthToSpaceCpp(BatchToSpaceDepthToSpace, CppOpImpl):
|
||||
pass
|
||||
|
||||
|
||||
class BatchToSpaceErrorHandlingTest(tf.test.TestCase, PythonOpImpl):
|
||||
class BatchToSpaceErrorHandlingTest(test.TestCase, PythonOpImpl):
|
||||
|
||||
def testInputWrongDimMissingBatch(self):
|
||||
# The input is missing the first dimension ("batch")
|
||||
@ -110,8 +113,8 @@ class BatchToSpaceErrorHandlingTest(tf.test.TestCase, PythonOpImpl):
|
||||
|
||||
def testUnknownShape(self):
|
||||
t = self.batch_to_space(
|
||||
tf.placeholder(tf.float32),
|
||||
tf.placeholder(tf.int32),
|
||||
array_ops.placeholder(dtypes.float32),
|
||||
array_ops.placeholder(dtypes.int32),
|
||||
block_size=4)
|
||||
self.assertEqual(4, t.get_shape().ndims)
|
||||
|
||||
@ -121,7 +124,7 @@ class BatchToSpaceErrorHandlingCppTest(BatchToSpaceErrorHandlingTest,
|
||||
pass
|
||||
|
||||
|
||||
class BatchToSpaceNDErrorHandlingTest(tf.test.TestCase):
|
||||
class BatchToSpaceNDErrorHandlingTest(test.TestCase):
|
||||
|
||||
def _testStaticShape(self, input_shape, block_shape, paddings, error):
|
||||
block_shape = np.array(block_shape)
|
||||
@ -129,7 +132,7 @@ class BatchToSpaceNDErrorHandlingTest(tf.test.TestCase):
|
||||
|
||||
# Try with sizes known at graph construction time.
|
||||
with self.assertRaises(error):
|
||||
_ = tf.batch_to_space_nd(
|
||||
_ = array_ops.batch_to_space_nd(
|
||||
np.zeros(input_shape, np.float32), block_shape, paddings)
|
||||
|
||||
def _testDynamicShape(self, input_shape, block_shape, paddings):
|
||||
@ -137,16 +140,19 @@ class BatchToSpaceNDErrorHandlingTest(tf.test.TestCase):
|
||||
paddings = np.array(paddings)
|
||||
|
||||
# Try with sizes unknown at graph construction time.
|
||||
input_placeholder = tf.placeholder(tf.float32)
|
||||
block_shape_placeholder = tf.placeholder(tf.int32, shape=block_shape.shape)
|
||||
paddings_placeholder = tf.placeholder(tf.int32)
|
||||
t = tf.batch_to_space_nd(input_placeholder, block_shape_placeholder,
|
||||
paddings_placeholder)
|
||||
input_placeholder = array_ops.placeholder(dtypes.float32)
|
||||
block_shape_placeholder = array_ops.placeholder(
|
||||
dtypes.int32, shape=block_shape.shape)
|
||||
paddings_placeholder = array_ops.placeholder(dtypes.int32)
|
||||
t = array_ops.batch_to_space_nd(input_placeholder, block_shape_placeholder,
|
||||
paddings_placeholder)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
_ = t.eval({input_placeholder: np.zeros(input_shape, np.float32),
|
||||
block_shape_placeholder: block_shape,
|
||||
paddings_placeholder: paddings})
|
||||
_ = t.eval({
|
||||
input_placeholder: np.zeros(input_shape, np.float32),
|
||||
block_shape_placeholder: block_shape,
|
||||
paddings_placeholder: paddings
|
||||
})
|
||||
|
||||
def _testShape(self, input_shape, block_shape, paddings, error):
|
||||
self._testStaticShape(input_shape, block_shape, paddings, error)
|
||||
@ -176,54 +182,62 @@ class BatchToSpaceNDErrorHandlingTest(tf.test.TestCase):
|
||||
|
||||
def testUnknownShape(self):
|
||||
# Verify that input shape and paddings shape can be unknown.
|
||||
_ = tf.batch_to_space_nd(
|
||||
tf.placeholder(tf.float32),
|
||||
tf.placeholder(tf.int32, shape=(2,)),
|
||||
tf.placeholder(tf.int32))
|
||||
_ = array_ops.batch_to_space_nd(
|
||||
array_ops.placeholder(dtypes.float32),
|
||||
array_ops.placeholder(
|
||||
dtypes.int32, shape=(2,)),
|
||||
array_ops.placeholder(dtypes.int32))
|
||||
|
||||
# Only number of input dimensions is known.
|
||||
t = tf.batch_to_space_nd(
|
||||
tf.placeholder(tf.float32, shape=(None, None, None, None)),
|
||||
tf.placeholder(tf.int32, shape=(2,)),
|
||||
tf.placeholder(tf.int32))
|
||||
t = array_ops.batch_to_space_nd(
|
||||
array_ops.placeholder(
|
||||
dtypes.float32, shape=(None, None, None, None)),
|
||||
array_ops.placeholder(
|
||||
dtypes.int32, shape=(2,)),
|
||||
array_ops.placeholder(dtypes.int32))
|
||||
self.assertEqual(4, t.get_shape().ndims)
|
||||
|
||||
# Dimensions are partially known.
|
||||
t = tf.batch_to_space_nd(
|
||||
tf.placeholder(tf.float32, shape=(None, None, None, 2)),
|
||||
tf.placeholder(tf.int32, shape=(2,)),
|
||||
tf.placeholder(tf.int32))
|
||||
t = array_ops.batch_to_space_nd(
|
||||
array_ops.placeholder(
|
||||
dtypes.float32, shape=(None, None, None, 2)),
|
||||
array_ops.placeholder(
|
||||
dtypes.int32, shape=(2,)),
|
||||
array_ops.placeholder(dtypes.int32))
|
||||
self.assertEqual([None, None, None, 2], t.get_shape().as_list())
|
||||
|
||||
# Dimensions are partially known.
|
||||
t = tf.batch_to_space_nd(
|
||||
tf.placeholder(tf.float32, shape=(3 * 2 * 3, None, None, 2)), [2, 3],
|
||||
tf.placeholder(tf.int32))
|
||||
t = array_ops.batch_to_space_nd(
|
||||
array_ops.placeholder(
|
||||
dtypes.float32, shape=(3 * 2 * 3, None, None, 2)), [2, 3],
|
||||
array_ops.placeholder(dtypes.int32))
|
||||
self.assertEqual([3, None, None, 2], t.get_shape().as_list())
|
||||
|
||||
# Dimensions are partially known.
|
||||
t = tf.batch_to_space_nd(
|
||||
tf.placeholder(tf.float32, shape=(3 * 2 * 3, None, 2, 2)), [2, 3],
|
||||
t = array_ops.batch_to_space_nd(
|
||||
array_ops.placeholder(
|
||||
dtypes.float32, shape=(3 * 2 * 3, None, 2, 2)), [2, 3],
|
||||
[[1, 1], [0, 1]])
|
||||
self.assertEqual([3, None, 5, 2], t.get_shape().as_list())
|
||||
|
||||
# Dimensions are fully known.
|
||||
t = tf.batch_to_space_nd(
|
||||
tf.placeholder(tf.float32, shape=(3 * 2 * 3, 2, 1, 2)), [2, 3],
|
||||
t = array_ops.batch_to_space_nd(
|
||||
array_ops.placeholder(
|
||||
dtypes.float32, shape=(3 * 2 * 3, 2, 1, 2)), [2, 3],
|
||||
[[1, 1], [0, 0]])
|
||||
self.assertEqual([3, 2, 3, 2], t.get_shape().as_list())
|
||||
|
||||
|
||||
class BatchToSpaceGradientTest(tf.test.TestCase, PythonOpImpl):
|
||||
class BatchToSpaceGradientTest(test.TestCase, PythonOpImpl):
|
||||
|
||||
# Check the gradients.
|
||||
def _checkGrad(self, x, crops, block_size):
|
||||
assert 4 == x.ndim
|
||||
with self.test_session():
|
||||
tf_x = tf.convert_to_tensor(x)
|
||||
tf_x = ops.convert_to_tensor(x)
|
||||
tf_y = self.batch_to_space(tf_x, crops, block_size)
|
||||
epsilon = 1e-5
|
||||
((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
|
||||
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
|
||||
tf_x,
|
||||
x.shape,
|
||||
tf_y,
|
||||
@ -240,8 +254,8 @@ class BatchToSpaceGradientTest(tf.test.TestCase, PythonOpImpl):
|
||||
x = np.random.normal(0, 1, b * h * w * d *
|
||||
block_size_sq).astype(np.float32).reshape(
|
||||
[b * block_size * block_size, h, w, d])
|
||||
crops = np.array([[crop_beg, crop_end], [crop_beg, crop_end]],
|
||||
dtype=np.int32)
|
||||
crops = np.array(
|
||||
[[crop_beg, crop_end], [crop_beg, crop_end]], dtype=np.int32)
|
||||
|
||||
self._checkGrad(x, crops, block_size)
|
||||
|
||||
@ -270,17 +284,17 @@ class BatchToSpaceGradientCppTest(BatchToSpaceGradientTest, CppOpImpl):
|
||||
pass
|
||||
|
||||
|
||||
class BatchToSpaceNDGradientTest(tf.test.TestCase):
|
||||
class BatchToSpaceNDGradientTest(test.TestCase):
|
||||
|
||||
# Check the gradients.
|
||||
def _checkGrad(self, x, block_shape, crops):
|
||||
block_shape = np.array(block_shape)
|
||||
crops = np.array(crops).reshape((len(block_shape), 2))
|
||||
with self.test_session():
|
||||
tf_x = tf.convert_to_tensor(x)
|
||||
tf_y = tf.batch_to_space_nd(tf_x, block_shape, crops)
|
||||
tf_x = ops.convert_to_tensor(x)
|
||||
tf_y = array_ops.batch_to_space_nd(tf_x, block_shape, crops)
|
||||
epsilon = 1e-5
|
||||
((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
|
||||
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
|
||||
tf_x,
|
||||
x.shape,
|
||||
tf_y,
|
||||
@ -310,4 +324,4 @@ class BatchToSpaceNDGradientTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,19 +12,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.kernels.bcast_ops."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class BcastOpsTest(tf.test.TestCase):
|
||||
class BcastOpsTest(test.TestCase):
|
||||
|
||||
def _GetGradientArgs(self, xs, ys):
|
||||
with self.test_session() as sess:
|
||||
@ -90,4 +88,4 @@ class BcastOpsTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.python.framework.importer."""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@ -22,11 +21,12 @@ import json
|
||||
import os
|
||||
import random
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.core.util import test_log_pb2
|
||||
from tensorflow.python.client import session
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.platform import benchmark
|
||||
|
||||
from tensorflow.python.platform import gfile
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
# Used by SomeRandomBenchmark class below.
|
||||
_ran_somebenchmark_1 = [False]
|
||||
@ -34,7 +34,7 @@ _ran_somebenchmark_2 = [False]
|
||||
_ran_somebenchmark_but_shouldnt = [False]
|
||||
|
||||
|
||||
class SomeRandomBenchmark(tf.test.Benchmark):
|
||||
class SomeRandomBenchmark(test.Benchmark):
|
||||
"""This Benchmark should automatically be registered in the registry."""
|
||||
|
||||
def _dontRunThisBenchmark(self):
|
||||
@ -50,7 +50,7 @@ class SomeRandomBenchmark(tf.test.Benchmark):
|
||||
_ran_somebenchmark_2[0] = True
|
||||
|
||||
|
||||
class TestReportingBenchmark(tf.test.Benchmark):
|
||||
class TestReportingBenchmark(test.Benchmark):
|
||||
"""This benchmark (maybe) reports some stuff."""
|
||||
|
||||
def benchmarkReport1(self):
|
||||
@ -58,19 +58,20 @@ class TestReportingBenchmark(tf.test.Benchmark):
|
||||
|
||||
def benchmarkReport2(self):
|
||||
self.report_benchmark(
|
||||
iters=2, name="custom_benchmark_name",
|
||||
extras={"number_key": 3, "other_key": "string"})
|
||||
iters=2,
|
||||
name="custom_benchmark_name",
|
||||
extras={"number_key": 3,
|
||||
"other_key": "string"})
|
||||
|
||||
def benchmark_times_an_op(self):
|
||||
with tf.Session() as sess:
|
||||
a = tf.constant(0.0)
|
||||
with session.Session() as sess:
|
||||
a = constant_op.constant(0.0)
|
||||
a_plus_a = a + a
|
||||
self.run_op_benchmark(
|
||||
sess, a_plus_a, min_iters=1000, store_trace=True,
|
||||
name="op_benchmark")
|
||||
sess, a_plus_a, min_iters=1000, store_trace=True, name="op_benchmark")
|
||||
|
||||
|
||||
class BenchmarkTest(tf.test.TestCase):
|
||||
class BenchmarkTest(test.TestCase):
|
||||
|
||||
def testGlobalBenchmarkRegistry(self):
|
||||
registry = list(benchmark.GLOBAL_BENCHMARK_REGISTRY)
|
||||
@ -116,30 +117,30 @@ class BenchmarkTest(tf.test.TestCase):
|
||||
self.assertFalse(_ran_somebenchmark_but_shouldnt[0])
|
||||
|
||||
def testReportingBenchmark(self):
|
||||
tempdir = tf.test.get_temp_dir()
|
||||
tempdir = test.get_temp_dir()
|
||||
try:
|
||||
tf.gfile.MakeDirs(tempdir)
|
||||
gfile.MakeDirs(tempdir)
|
||||
except OSError as e:
|
||||
# It's OK if the directory already exists.
|
||||
if " exists:" not in str(e):
|
||||
raise e
|
||||
|
||||
prefix = os.path.join(
|
||||
tempdir, "reporting_bench_%016x_" % random.getrandbits(64))
|
||||
expected_output_file = "%s%s" % (
|
||||
prefix, "TestReportingBenchmark.benchmarkReport1")
|
||||
prefix = os.path.join(tempdir,
|
||||
"reporting_bench_%016x_" % random.getrandbits(64))
|
||||
expected_output_file = "%s%s" % (prefix,
|
||||
"TestReportingBenchmark.benchmarkReport1")
|
||||
expected_output_file_2 = "%s%s" % (
|
||||
prefix, "TestReportingBenchmark.custom_benchmark_name")
|
||||
expected_output_file_3 = "%s%s" % (
|
||||
prefix, "TestReportingBenchmark.op_benchmark")
|
||||
expected_output_file_3 = "%s%s" % (prefix,
|
||||
"TestReportingBenchmark.op_benchmark")
|
||||
try:
|
||||
self.assertFalse(tf.gfile.Exists(expected_output_file))
|
||||
self.assertFalse(gfile.Exists(expected_output_file))
|
||||
# Run benchmark but without env, shouldn't write anything
|
||||
if benchmark.TEST_REPORTER_TEST_ENV in os.environ:
|
||||
del os.environ[benchmark.TEST_REPORTER_TEST_ENV]
|
||||
reporting = TestReportingBenchmark()
|
||||
reporting.benchmarkReport1() # This should run without writing anything
|
||||
self.assertFalse(tf.gfile.Exists(expected_output_file))
|
||||
self.assertFalse(gfile.Exists(expected_output_file))
|
||||
|
||||
# Runbenchmark with env, should write
|
||||
os.environ[benchmark.TEST_REPORTER_TEST_ENV] = prefix
|
||||
@ -150,9 +151,9 @@ class BenchmarkTest(tf.test.TestCase):
|
||||
reporting.benchmark_times_an_op() # This should write
|
||||
|
||||
# Check the files were written
|
||||
self.assertTrue(tf.gfile.Exists(expected_output_file))
|
||||
self.assertTrue(tf.gfile.Exists(expected_output_file_2))
|
||||
self.assertTrue(tf.gfile.Exists(expected_output_file_3))
|
||||
self.assertTrue(gfile.Exists(expected_output_file))
|
||||
self.assertTrue(gfile.Exists(expected_output_file_2))
|
||||
self.assertTrue(gfile.Exists(expected_output_file_3))
|
||||
|
||||
# Check the contents are correct
|
||||
expected_1 = test_log_pb2.BenchmarkEntry()
|
||||
@ -170,7 +171,7 @@ class BenchmarkTest(tf.test.TestCase):
|
||||
expected_3.iters = 1000
|
||||
|
||||
def read_benchmark_entry(f):
|
||||
s = tf.gfile.GFile(f, "rb").read()
|
||||
s = gfile.GFile(f, "rb").read()
|
||||
entries = test_log_pb2.BenchmarkEntries.FromString(s)
|
||||
self.assertEquals(1, len(entries.entry))
|
||||
return entries.entry[0]
|
||||
@ -191,8 +192,8 @@ class BenchmarkTest(tf.test.TestCase):
|
||||
self.assertTrue("traceEvents" in json_trace.keys())
|
||||
|
||||
finally:
|
||||
tf.gfile.DeleteRecursively(tempdir)
|
||||
gfile.DeleteRecursively(tempdir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Functional tests for 3d convolutional operations."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
@ -20,10 +21,16 @@ from __future__ import print_function
|
||||
import itertools
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.platform import tf_logging
|
||||
|
||||
|
||||
class BetaincTest(tf.test.TestCase):
|
||||
class BetaincTest(test.TestCase):
|
||||
use_gpu = False
|
||||
|
||||
def _testBetaInc(self, dtype):
|
||||
@ -36,24 +43,23 @@ class BetaincTest(tf.test.TestCase):
|
||||
b_s = np.abs(np.random.randn(10, 10) * 30).astype(np_dt) # in (0, infty)
|
||||
x_s = np.random.rand(10, 10).astype(np_dt) # in (0, 1)
|
||||
with self.test_session(use_gpu=self.use_gpu):
|
||||
tf_a_s = tf.constant(a_s, dtype=dtype)
|
||||
tf_b_s = tf.constant(b_s, dtype=dtype)
|
||||
tf_x_s = tf.constant(x_s, dtype=dtype)
|
||||
tf_out = tf.betainc(tf_a_s, tf_b_s, tf_x_s).eval()
|
||||
tf_a_s = constant_op.constant(a_s, dtype=dtype)
|
||||
tf_b_s = constant_op.constant(b_s, dtype=dtype)
|
||||
tf_x_s = constant_op.constant(x_s, dtype=dtype)
|
||||
tf_out = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s).eval()
|
||||
scipy_out = special.betainc(a_s, b_s, x_s).astype(np_dt)
|
||||
|
||||
# the scipy version of betainc uses a double-only implementation.
|
||||
# TODO(ebrevdo): identify reasons for (sometime) precision loss
|
||||
# with doubles
|
||||
tol = 1e-4 if dtype == tf.float32 else 5e-5
|
||||
tol = 1e-4 if dtype == dtypes.float32 else 5e-5
|
||||
self.assertAllCloseAccordingToType(scipy_out, tf_out, rtol=tol, atol=tol)
|
||||
|
||||
# Test out-of-range values (most should return nan output)
|
||||
combinations = list(itertools.product([-1, 0, 0.5, 1.0, 1.5], repeat=3))
|
||||
a_comb, b_comb, x_comb = np.asarray(
|
||||
list(zip(*combinations)), dtype=np_dt)
|
||||
a_comb, b_comb, x_comb = np.asarray(list(zip(*combinations)), dtype=np_dt)
|
||||
with self.test_session(use_gpu=self.use_gpu):
|
||||
tf_comb = tf.betainc(a_comb, b_comb, x_comb).eval()
|
||||
tf_comb = math_ops.betainc(a_comb, b_comb, x_comb).eval()
|
||||
scipy_comb = special.betainc(a_comb, b_comb, x_comb).astype(np_dt)
|
||||
self.assertAllCloseAccordingToType(scipy_comb, tf_comb)
|
||||
|
||||
@ -61,43 +67,56 @@ class BetaincTest(tf.test.TestCase):
|
||||
with self.test_session(use_gpu=self.use_gpu):
|
||||
self.assertAllCloseAccordingToType(
|
||||
special.betainc(0.1, b_s, x_s).astype(np_dt),
|
||||
tf.betainc(0.1, b_s, x_s).eval(), rtol=tol, atol=tol)
|
||||
math_ops.betainc(0.1, b_s, x_s).eval(),
|
||||
rtol=tol,
|
||||
atol=tol)
|
||||
self.assertAllCloseAccordingToType(
|
||||
special.betainc(a_s, 0.1, x_s).astype(np_dt),
|
||||
tf.betainc(a_s, 0.1, x_s).eval(), rtol=tol, atol=tol)
|
||||
math_ops.betainc(a_s, 0.1, x_s).eval(),
|
||||
rtol=tol,
|
||||
atol=tol)
|
||||
self.assertAllCloseAccordingToType(
|
||||
special.betainc(a_s, b_s, 0.1).astype(np_dt),
|
||||
tf.betainc(a_s, b_s, 0.1).eval(), rtol=tol, atol=tol)
|
||||
math_ops.betainc(a_s, b_s, 0.1).eval(),
|
||||
rtol=tol,
|
||||
atol=tol)
|
||||
self.assertAllCloseAccordingToType(
|
||||
special.betainc(0.1, b_s, 0.1).astype(np_dt),
|
||||
tf.betainc(0.1, b_s, 0.1).eval(), rtol=tol, atol=tol)
|
||||
math_ops.betainc(0.1, b_s, 0.1).eval(),
|
||||
rtol=tol,
|
||||
atol=tol)
|
||||
self.assertAllCloseAccordingToType(
|
||||
special.betainc(0.1, 0.1, 0.1).astype(np_dt),
|
||||
tf.betainc(0.1, 0.1, 0.1).eval(), rtol=tol, atol=tol)
|
||||
math_ops.betainc(0.1, 0.1, 0.1).eval(),
|
||||
rtol=tol,
|
||||
atol=tol)
|
||||
|
||||
with self.assertRaisesRegexp(ValueError, "must be equal"):
|
||||
tf.betainc(0.5, [0.5], [[0.5]])
|
||||
math_ops.betainc(0.5, [0.5], [[0.5]])
|
||||
|
||||
with self.test_session(use_gpu=self.use_gpu):
|
||||
with self.assertRaisesOpError("Shapes of .* are inconsistent"):
|
||||
a_p = tf.placeholder(dtype)
|
||||
b_p = tf.placeholder(dtype)
|
||||
x_p = tf.placeholder(dtype)
|
||||
tf.betainc(a_p, b_p, x_p).eval(
|
||||
feed_dict={a_p: 0.5, b_p: [0.5], x_p: [[0.5]]})
|
||||
a_p = array_ops.placeholder(dtype)
|
||||
b_p = array_ops.placeholder(dtype)
|
||||
x_p = array_ops.placeholder(dtype)
|
||||
math_ops.betainc(a_p, b_p, x_p).eval(
|
||||
feed_dict={a_p: 0.5,
|
||||
b_p: [0.5],
|
||||
x_p: [[0.5]]})
|
||||
|
||||
except ImportError as e:
|
||||
tf.logging.warn("Cannot test special functions: %s" % str(e))
|
||||
tf_logging.warn("Cannot test special functions: %s" % str(e))
|
||||
|
||||
def testBetaIncFloat(self):
|
||||
self._testBetaInc(tf.float32)
|
||||
self._testBetaInc(dtypes.float32)
|
||||
|
||||
def testBetaIncDouble(self):
|
||||
self._testBetaInc(tf.float64)
|
||||
self._testBetaInc(dtypes.float64)
|
||||
|
||||
|
||||
class BetaincTestGPU(BetaincTest):
|
||||
use_gpu = True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,14 +12,22 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Functional tests for BiasAdd."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import gradients_impl
|
||||
from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
def GetTestConfigs():
|
||||
@ -29,31 +37,32 @@ def GetTestConfigs():
|
||||
all the valid test configs as tuples of data_format and use_gpu.
|
||||
"""
|
||||
test_configs = [("NHWC", False), ("NHWC", True)]
|
||||
if tf.test.is_gpu_available(cuda_only=True):
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
# "NCHW" format is currently only supported on CUDA.
|
||||
test_configs += [("NCHW", True)]
|
||||
return test_configs
|
||||
|
||||
|
||||
class BiasAddTest(tf.test.TestCase):
|
||||
class BiasAddTest(test.TestCase):
|
||||
|
||||
def _npBias(self, inputs, bias):
|
||||
assert len(bias.shape) == 1
|
||||
print(inputs.shape)
|
||||
print(bias.shape)
|
||||
assert inputs.shape[-1] == bias.shape[0]
|
||||
return inputs + bias.reshape(([1] * (len(inputs.shape) - 1))
|
||||
+ [bias.shape[0]])
|
||||
return inputs + bias.reshape(([1] * (len(inputs.shape) - 1)) +
|
||||
[bias.shape[0]])
|
||||
|
||||
def testNpBias(self):
|
||||
self.assertAllClose(np.array([[11, 22, 33], [41, 52, 63]]),
|
||||
self._npBias(np.array([[10, 20, 30], [40, 50, 60]]),
|
||||
np.array([1, 2, 3])))
|
||||
self.assertAllClose(
|
||||
np.array([[11, 22, 33], [41, 52, 63]]),
|
||||
self._npBias(
|
||||
np.array([[10, 20, 30], [40, 50, 60]]), np.array([1, 2, 3])))
|
||||
|
||||
def _testBias(self, np_inputs, np_bias, use_gpu=False):
|
||||
np_val = self._npBias(np_inputs, np_bias)
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
tf_val = tf.nn.bias_add(np_inputs, np_bias).eval()
|
||||
tf_val = nn_ops.bias_add(np_inputs, np_bias).eval()
|
||||
self.assertAllCloseAccordingToType(np_val, tf_val)
|
||||
|
||||
def _AtLeast3d(self, np_value):
|
||||
@ -81,7 +90,7 @@ class BiasAddTest(tf.test.TestCase):
|
||||
np_val = self._npBias(np_inputs, np_bias)
|
||||
np_inputs = self._NHWCToNCHW(np_inputs)
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
tf_val = tf.nn.bias_add(np_inputs, np_bias, data_format="NCHW").eval()
|
||||
tf_val = nn_ops.bias_add(np_inputs, np_bias, data_format="NCHW").eval()
|
||||
tf_val = self._NCHWToNHWC(tf_val)
|
||||
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
|
||||
|
||||
@ -89,75 +98,90 @@ class BiasAddTest(tf.test.TestCase):
|
||||
self._testBias(np_inputs, np_bias, use_gpu=False)
|
||||
if np_inputs.dtype in [np.float16, np.float32, np.float64]:
|
||||
self._testBias(np_inputs, np_bias, use_gpu=True)
|
||||
if tf.test.is_gpu_available(cuda_only=True):
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
|
||||
|
||||
def testInputDims(self):
|
||||
with self.assertRaises(ValueError):
|
||||
tf.nn.bias_add([1, 2], [1])
|
||||
nn_ops.bias_add([1, 2], [1])
|
||||
|
||||
def testBiasVec(self):
|
||||
with self.assertRaises(ValueError):
|
||||
tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
|
||||
tf.reshape([1, 2], shape=[1, 2]))
|
||||
nn_ops.bias_add(
|
||||
array_ops.reshape(
|
||||
[1, 2], shape=[1, 2]),
|
||||
array_ops.reshape(
|
||||
[1, 2], shape=[1, 2]))
|
||||
|
||||
def testBiasInputsMatch(self):
|
||||
with self.assertRaises(ValueError):
|
||||
tf.nn.bias_add(tf.reshape([1, 2], shape=[1, 2]),
|
||||
tf.reshape([1], shape=[1]))
|
||||
nn_ops.bias_add(
|
||||
array_ops.reshape(
|
||||
[1, 2], shape=[1, 2]),
|
||||
array_ops.reshape(
|
||||
[1], shape=[1]))
|
||||
|
||||
def testIntTypes(self):
|
||||
for t in [np.int8, np.int16, np.int32, np.int64]:
|
||||
self._testAll(np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
|
||||
np.array([1, 2, 3]).astype(t))
|
||||
self._testAll(
|
||||
np.array([[10, 20, 30], [40, 50, 60]]).astype(t),
|
||||
np.array([1, 2, 3]).astype(t))
|
||||
|
||||
def testFloatTypes(self):
|
||||
for t in [np.float16, np.float32, np.float64]:
|
||||
self._testAll(np.random.rand(4, 3, 3).astype(t),
|
||||
np.random.rand(3).astype(t))
|
||||
self._testAll(
|
||||
np.random.rand(4, 3, 3).astype(t), np.random.rand(3).astype(t))
|
||||
|
||||
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
if data_format == "NCHW":
|
||||
np_input = self._NHWCToNCHW(np_input)
|
||||
input_tensor = tf.constant(np_input, shape=np_input.shape, dtype=dtype)
|
||||
bias_tensor = tf.constant(bias, shape=bias.shape, dtype=dtype)
|
||||
output_tensor = tf.nn.bias_add(input_tensor, bias_tensor,
|
||||
data_format=data_format)
|
||||
tensor_jacob_t, tensor_jacob_n = tf.test.compute_gradient(
|
||||
input_tensor = constant_op.constant(
|
||||
np_input, shape=np_input.shape, dtype=dtype)
|
||||
bias_tensor = constant_op.constant(bias, shape=bias.shape, dtype=dtype)
|
||||
output_tensor = nn_ops.bias_add(
|
||||
input_tensor, bias_tensor, data_format=data_format)
|
||||
tensor_jacob_t, tensor_jacob_n = gradient_checker.compute_gradient(
|
||||
input_tensor, np_input.shape, output_tensor, np_input.shape)
|
||||
bias_jacob_t, bias_jacob_n = tf.test.compute_gradient(
|
||||
bias_jacob_t, bias_jacob_n = gradient_checker.compute_gradient(
|
||||
bias_tensor, bias.shape, output_tensor, np_input.shape)
|
||||
|
||||
|
||||
# Test gradient of BiasAddGrad
|
||||
bias_add_grad = tf.gradients(tf.nn.l2_loss(output_tensor),
|
||||
bias_tensor)[0]
|
||||
grad_jacob_t, grad_jacob_n = tf.test.compute_gradient(
|
||||
bias_add_grad = gradients_impl.gradients(
|
||||
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
|
||||
grad_jacob_t, grad_jacob_n = gradient_checker.compute_gradient(
|
||||
output_tensor, np_input.shape, bias_add_grad, bias.shape)
|
||||
|
||||
|
||||
if dtype == np.float16:
|
||||
# Compare fp16 theoretical gradients to fp32 numerical gradients,
|
||||
# since fp16 numerical gradients are too imprecise unless great
|
||||
# care is taken with choosing the inputs and the delta. This is
|
||||
# a weaker check (in particular, it does not test the op itself,
|
||||
# only its gradient), but it's much better than nothing.
|
||||
input_tensor = tf.constant(np_input, shape=np_input.shape,
|
||||
dtype=np.float32)
|
||||
bias_tensor = tf.constant(bias, shape=bias.shape, dtype=np.float32)
|
||||
output_tensor = tf.nn.bias_add(input_tensor, bias_tensor,
|
||||
data_format=data_format)
|
||||
_, tensor_jacob_n = tf.test.compute_gradient(
|
||||
input_tensor, np_input.shape, output_tensor, np_input.shape)
|
||||
_, bias_jacob_n = tf.test.compute_gradient(
|
||||
bias_tensor, bias.shape, output_tensor, np_input.shape)
|
||||
|
||||
bias_add_grad = tf.gradients(tf.nn.l2_loss(output_tensor),
|
||||
bias_tensor)[0]
|
||||
_, grad_jacob_n = tf.test.compute_gradient(
|
||||
output_tensor, np_input.shape, bias_add_grad, bias.shape)
|
||||
|
||||
input_tensor = constant_op.constant(
|
||||
np_input, shape=np_input.shape, dtype=np.float32)
|
||||
bias_tensor = constant_op.constant(
|
||||
bias, shape=bias.shape, dtype=np.float32)
|
||||
output_tensor = nn_ops.bias_add(
|
||||
input_tensor, bias_tensor, data_format=data_format)
|
||||
_, tensor_jacob_n = gradient_checker.compute_gradient(input_tensor,
|
||||
np_input.shape,
|
||||
output_tensor,
|
||||
np_input.shape)
|
||||
_, bias_jacob_n = gradient_checker.compute_gradient(bias_tensor,
|
||||
bias.shape,
|
||||
output_tensor,
|
||||
np_input.shape)
|
||||
|
||||
bias_add_grad = gradients_impl.gradients(
|
||||
nn_ops.l2_loss(output_tensor), bias_tensor)[0]
|
||||
_, grad_jacob_n = gradient_checker.compute_gradient(output_tensor,
|
||||
np_input.shape,
|
||||
bias_add_grad,
|
||||
bias.shape)
|
||||
|
||||
threshold = 2e-3
|
||||
if dtype == tf.float64:
|
||||
if dtype == dtypes.float64:
|
||||
threshold = 1e-10
|
||||
self.assertAllClose(tensor_jacob_t, tensor_jacob_n, threshold, threshold)
|
||||
self.assertAllClose(bias_jacob_t, bias_jacob_n, threshold, threshold)
|
||||
@ -165,17 +189,19 @@ class BiasAddTest(tf.test.TestCase):
|
||||
|
||||
def testGradientTensor(self):
|
||||
for (data_format, use_gpu) in GetTestConfigs():
|
||||
for dtype in (tf.float16, tf.float32, tf.float64):
|
||||
np_input = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
|
||||
dtype=dtype.as_numpy_dtype).reshape(3, 2)
|
||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
||||
np_input = np.array(
|
||||
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
|
||||
dtype=dtype.as_numpy_dtype).reshape(3, 2)
|
||||
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
|
||||
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
|
||||
|
||||
def testGradientTensor4D(self):
|
||||
for (data_format, use_gpu) in GetTestConfigs():
|
||||
for dtype in (tf.float16, tf.float32, tf.float64):
|
||||
np_input = np.arange(1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
|
||||
[2, 3, 4, 2]).astype(np.float32)
|
||||
for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
|
||||
np_input = np.arange(
|
||||
1.0, 49.0, dtype=dtype.as_numpy_dtype).reshape(
|
||||
[2, 3, 4, 2]).astype(np.float32)
|
||||
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
|
||||
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
|
||||
|
||||
@ -187,9 +213,10 @@ class BiasAddTest(tf.test.TestCase):
|
||||
def testEmptyGradient(self):
|
||||
for data_format, use_gpu in GetTestConfigs():
|
||||
for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
|
||||
self._testGradient(np.random.randn(*shape), np.random.randn(shape[-1]),
|
||||
tf.float64, data_format, use_gpu)
|
||||
self._testGradient(
|
||||
np.random.randn(*shape),
|
||||
np.random.randn(shape[-1]), dtypes.float64, data_format, use_gpu)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -13,20 +13,23 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Tests for tf.bitcast."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class BitcastTest(tf.test.TestCase):
|
||||
class BitcastTest(test.TestCase):
|
||||
|
||||
def _testBitcast(self, x, datatype, shape):
|
||||
with self.test_session():
|
||||
tf_ans = tf.bitcast(x, datatype)
|
||||
tf_ans = array_ops.bitcast(x, datatype)
|
||||
out = tf_ans.eval()
|
||||
buff_after = memoryview(out).tobytes()
|
||||
buff_before = memoryview(x).tobytes()
|
||||
@ -36,13 +39,13 @@ class BitcastTest(tf.test.TestCase):
|
||||
|
||||
def testSmaller(self):
|
||||
x = np.random.rand(3, 2)
|
||||
datatype = tf.int8
|
||||
datatype = dtypes.int8
|
||||
shape = [3, 2, 8]
|
||||
self._testBitcast(x, datatype, shape)
|
||||
|
||||
def testLarger(self):
|
||||
x = np.arange(16, dtype=np.int8).reshape([4, 4])
|
||||
datatype = tf.int32
|
||||
datatype = dtypes.int32
|
||||
shape = [4]
|
||||
self._testBitcast(x, datatype, shape)
|
||||
|
||||
@ -54,25 +57,25 @@ class BitcastTest(tf.test.TestCase):
|
||||
def testSameSize(self):
|
||||
x = np.random.rand(3, 4)
|
||||
shape = [3, 4]
|
||||
self._testBitcast(x, tf.int64, shape)
|
||||
self._testBitcast(x, dtypes.int64, shape)
|
||||
|
||||
def testErrors(self):
|
||||
x = np.zeros([1, 1], np.int8)
|
||||
datatype = tf.int32
|
||||
datatype = dtypes.int32
|
||||
with self.assertRaisesRegexp(ValueError, "Cannot bitcast due to shape"):
|
||||
tf.bitcast(x, datatype, None)
|
||||
array_ops.bitcast(x, datatype, None)
|
||||
|
||||
def testEmpty(self):
|
||||
x = np.ones([], np.int32)
|
||||
datatype = tf.int8
|
||||
datatype = dtypes.int8
|
||||
shape = [4]
|
||||
self._testBitcast(x, datatype, shape)
|
||||
|
||||
def testUnknown(self):
|
||||
x = tf.placeholder(tf.float32)
|
||||
datatype = tf.int8
|
||||
tf.bitcast(x, datatype, None)
|
||||
x = array_ops.placeholder(dtypes.float32)
|
||||
datatype = dtypes.int8
|
||||
array_ops.bitcast(x, datatype, None)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,17 +12,23 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for CandidateSamplerOp."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import candidate_sampling_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class RangeSamplerOpsTest(tf.test.TestCase):
|
||||
class RangeSamplerOpsTest(test.TestCase):
|
||||
|
||||
BATCH_SIZE = 3
|
||||
NUM_TRUE = 2
|
||||
@ -33,9 +39,9 @@ class RangeSamplerOpsTest(tf.test.TestCase):
|
||||
|
||||
def testTrueCandidates(self):
|
||||
with self.test_session() as sess:
|
||||
indices = tf.constant([0, 0, 1, 1, 2, 2])
|
||||
true_candidates_vec = tf.constant([1, 2, 0, 4, 3, 3])
|
||||
true_candidates_matrix = tf.reshape(
|
||||
indices = constant_op.constant([0, 0, 1, 1, 2, 2])
|
||||
true_candidates_vec = constant_op.constant([1, 2, 0, 4, 3, 3])
|
||||
true_candidates_matrix = array_ops.reshape(
|
||||
true_candidates_vec, [self.BATCH_SIZE, self.NUM_TRUE])
|
||||
indices_val, true_candidates_val = sess.run(
|
||||
[indices, true_candidates_matrix])
|
||||
@ -45,9 +51,9 @@ class RangeSamplerOpsTest(tf.test.TestCase):
|
||||
|
||||
def testSampledCandidates(self):
|
||||
with self.test_session():
|
||||
true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
|
||||
dtype=tf.int64)
|
||||
sampled_candidates, _, _ = tf.nn.all_candidate_sampler(
|
||||
true_classes = constant_op.constant(
|
||||
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
|
||||
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
|
||||
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
|
||||
result = sampled_candidates.eval()
|
||||
|
||||
@ -57,26 +63,26 @@ class RangeSamplerOpsTest(tf.test.TestCase):
|
||||
|
||||
def testTrueLogExpectedCount(self):
|
||||
with self.test_session():
|
||||
true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
|
||||
dtype=tf.int64)
|
||||
_, true_expected_count, _ = tf.nn.all_candidate_sampler(
|
||||
true_classes = constant_op.constant(
|
||||
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
|
||||
_, true_expected_count, _ = candidate_sampling_ops.all_candidate_sampler(
|
||||
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
|
||||
true_log_expected_count = tf.log(true_expected_count)
|
||||
true_log_expected_count = math_ops.log(true_expected_count)
|
||||
result = true_log_expected_count.eval()
|
||||
|
||||
self.assertAllEqual(result, [[0.0] * self.NUM_TRUE] * self.BATCH_SIZE)
|
||||
self.assertEqual(true_expected_count.get_shape(), [self.BATCH_SIZE,
|
||||
self.NUM_TRUE])
|
||||
self.assertEqual(true_log_expected_count.get_shape(), [self.BATCH_SIZE,
|
||||
self.NUM_TRUE])
|
||||
self.assertEqual(true_expected_count.get_shape(),
|
||||
[self.BATCH_SIZE, self.NUM_TRUE])
|
||||
self.assertEqual(true_log_expected_count.get_shape(),
|
||||
[self.BATCH_SIZE, self.NUM_TRUE])
|
||||
|
||||
def testSampledLogExpectedCount(self):
|
||||
with self.test_session():
|
||||
true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
|
||||
dtype=tf.int64)
|
||||
_, _, sampled_expected_count = tf.nn.all_candidate_sampler(
|
||||
true_classes = constant_op.constant(
|
||||
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
|
||||
_, _, sampled_expected_count = candidate_sampling_ops.all_candidate_sampler(
|
||||
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
|
||||
sampled_log_expected_count = tf.log(sampled_expected_count)
|
||||
sampled_log_expected_count = math_ops.log(sampled_expected_count)
|
||||
result = sampled_log_expected_count.eval()
|
||||
|
||||
self.assertAllEqual(result, [0.0] * self.NUM_SAMPLED)
|
||||
@ -85,11 +91,11 @@ class RangeSamplerOpsTest(tf.test.TestCase):
|
||||
|
||||
def testAccidentalHits(self):
|
||||
with self.test_session() as sess:
|
||||
true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
|
||||
dtype=tf.int64)
|
||||
sampled_candidates, _, _ = tf.nn.all_candidate_sampler(
|
||||
true_classes = constant_op.constant(
|
||||
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
|
||||
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
|
||||
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
|
||||
accidental_hits = tf.nn.compute_accidental_hits(
|
||||
accidental_hits = candidate_sampling_ops.compute_accidental_hits(
|
||||
true_classes, sampled_candidates, self.NUM_TRUE)
|
||||
indices, ids, weights = sess.run(accidental_hits)
|
||||
|
||||
@ -104,16 +110,12 @@ class RangeSamplerOpsTest(tf.test.TestCase):
|
||||
|
||||
def draw(seed):
|
||||
with self.test_session():
|
||||
true_classes = tf.constant([[1, 2], [0, 4], [3, 3]],
|
||||
dtype=tf.int64)
|
||||
sampled, _, _ = tf.nn.log_uniform_candidate_sampler(
|
||||
true_classes,
|
||||
self.NUM_TRUE,
|
||||
self.NUM_SAMPLED,
|
||||
True,
|
||||
5,
|
||||
seed=seed)
|
||||
true_classes = constant_op.constant(
|
||||
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
|
||||
sampled, _, _ = candidate_sampling_ops.log_uniform_candidate_sampler(
|
||||
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True, 5, seed=seed)
|
||||
return sampled.eval()
|
||||
|
||||
# Non-zero seed. Repeatable.
|
||||
for seed in [1, 12, 123, 1234]:
|
||||
self.assertAllEqual(draw(seed), draw(seed))
|
||||
@ -128,4 +130,4 @@ class RangeSamplerOpsTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,41 +12,49 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.ops.tf.cast."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import sparse_tensor
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class CastOpTest(tf.test.TestCase):
|
||||
class CastOpTest(test.TestCase):
|
||||
|
||||
def _toDataType(self, dtype):
|
||||
"""Returns TensorFlow data type for numpy type."""
|
||||
if dtype == np.float32:
|
||||
return tf.float32
|
||||
return dtypes.float32
|
||||
elif dtype == np.float64:
|
||||
return tf.float64
|
||||
return dtypes.float64
|
||||
elif dtype == np.int32:
|
||||
return tf.int32
|
||||
return dtypes.int32
|
||||
elif dtype == np.int64:
|
||||
return tf.int64
|
||||
return dtypes.int64
|
||||
elif dtype == np.bool:
|
||||
return tf.bool
|
||||
return dtypes.bool
|
||||
elif dtype == np.complex64:
|
||||
return tf.complex64
|
||||
return dtypes.complex64
|
||||
elif dtype == np.complex128:
|
||||
return tf.complex128
|
||||
return dtypes.complex128
|
||||
else:
|
||||
return None
|
||||
|
||||
def _cast(self, x, dtype, use_gpu=False):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
val = tf.constant(x, self._toDataType(np.array([x]).dtype))
|
||||
return tf.cast(val, self._toDataType(dtype), name="cast").eval()
|
||||
val = constant_op.constant(x, self._toDataType(np.array([x]).dtype))
|
||||
return math_ops.cast(val, self._toDataType(dtype), name="cast").eval()
|
||||
|
||||
def _test(self, x, dtype, use_gpu=False):
|
||||
"""Tests cast(x) to dtype behaves the same as numpy.astype."""
|
||||
@ -57,11 +65,14 @@ class CastOpTest(tf.test.TestCase):
|
||||
def _testTypes(self, x, use_gpu=False):
|
||||
"""Tests cast(x) to different tf."""
|
||||
if use_gpu:
|
||||
type_list = [np.float32, np.float64, np.int64,
|
||||
np.complex64, np.complex128]
|
||||
type_list = [
|
||||
np.float32, np.float64, np.int64, np.complex64, np.complex128
|
||||
]
|
||||
else:
|
||||
type_list = [np.float32, np.float64, np.int32,
|
||||
np.int64, np.complex64, np.complex128]
|
||||
type_list = [
|
||||
np.float32, np.float64, np.int32, np.int64, np.complex64,
|
||||
np.complex128
|
||||
]
|
||||
for from_type in type_list:
|
||||
for to_type in type_list:
|
||||
self._test(x.astype(from_type), to_type, use_gpu)
|
||||
@ -84,17 +95,20 @@ class CastOpTest(tf.test.TestCase):
|
||||
def testSmallValues(self):
|
||||
f4 = np.finfo(np.float32)
|
||||
f8 = np.finfo(np.float64)
|
||||
self._testAll(np.array([0, -1, 1, -f4.resolution, f4.resolution,
|
||||
f8.resolution, -f8.resolution]))
|
||||
self._testAll(
|
||||
np.array([
|
||||
0, -1, 1, -f4.resolution, f4.resolution, f8.resolution,
|
||||
-f8.resolution
|
||||
]))
|
||||
|
||||
def testBfloat16(self):
|
||||
a = np.random.uniform(-100, 100, 100).astype(np.float32)
|
||||
with self.test_session(use_gpu=False):
|
||||
b = tf.cast(tf.cast(a, tf.bfloat16), tf.float32)
|
||||
self.assertAllClose(a, b.eval(), rtol=1/128.)
|
||||
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
|
||||
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
|
||||
with self.test_session(use_gpu=True):
|
||||
b = tf.cast(tf.cast(a, tf.bfloat16), tf.float32)
|
||||
self.assertAllClose(a, b.eval(), rtol=1/128.)
|
||||
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
|
||||
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
|
||||
|
||||
def testRandom(self):
|
||||
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
|
||||
@ -104,8 +118,9 @@ class CastOpTest(tf.test.TestCase):
|
||||
# integer values in somewhat unexpected ways. And they behave
|
||||
# differently on CPU and GPU.
|
||||
def _compare(self, x, dst_dtype, expected, use_gpu=False):
|
||||
np.testing.assert_equal(self._cast(x, dst_dtype, use_gpu=use_gpu),
|
||||
dst_dtype(expected))
|
||||
np.testing.assert_equal(
|
||||
self._cast(
|
||||
x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))
|
||||
|
||||
def testIntToFloatBoundary(self):
|
||||
i4 = np.iinfo(np.int32)
|
||||
@ -148,40 +163,39 @@ class CastOpTest(tf.test.TestCase):
|
||||
def _OpError(self, x, dtype, err):
|
||||
with self.test_session():
|
||||
with self.assertRaisesOpError(err):
|
||||
tf.cast(x, dtype).eval()
|
||||
math_ops.cast(x, dtype).eval()
|
||||
|
||||
def testNotImplemented(self):
|
||||
self._OpError(np.arange(0, 10), tf.string,
|
||||
"Cast.*int64.*string.*")
|
||||
self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int64.*string.*")
|
||||
|
||||
def testCastToTypeOfVariable(self):
|
||||
with self.test_session() as sess:
|
||||
x = tf.Variable(5, dtype=tf.float32)
|
||||
y = tf.Variable(True, dtype=tf.bool)
|
||||
cast = tf.cast(y, x.dtype)
|
||||
tf.global_variables_initializer().run()
|
||||
x = variables.Variable(5, dtype=dtypes.float32)
|
||||
y = variables.Variable(True, dtype=dtypes.bool)
|
||||
cast = math_ops.cast(y, x.dtype)
|
||||
variables.global_variables_initializer().run()
|
||||
self.assertEqual(1.0, sess.run(cast))
|
||||
|
||||
def testGradients(self):
|
||||
t = [tf.float32, tf.float64, tf.complex64, tf.complex128]
|
||||
t = [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
|
||||
for src_t in t:
|
||||
for dst_t in t:
|
||||
with self.test_session():
|
||||
x = tf.constant(1.0, src_t)
|
||||
z = tf.identity(x)
|
||||
y = tf.cast(z, dst_t)
|
||||
err = tf.test.compute_gradient_error(x, [], y, [])
|
||||
x = constant_op.constant(1.0, src_t)
|
||||
z = array_ops.identity(x)
|
||||
y = math_ops.cast(z, dst_t)
|
||||
err = gradient_checker.compute_gradient_error(x, [], y, [])
|
||||
self.assertLess(err, 1e-3)
|
||||
|
||||
|
||||
class SparseTensorCastTest(tf.test.TestCase):
|
||||
class SparseTensorCastTest(test.TestCase):
|
||||
|
||||
def testCast(self):
|
||||
indices = tf.constant([[0], [1], [2]], tf.int64)
|
||||
values = tf.constant(np.array([1, 2, 3], np.int64))
|
||||
shape = tf.constant([3], tf.int64)
|
||||
st = tf.SparseTensor(indices, values, shape)
|
||||
st_cast = tf.cast(st, tf.float32)
|
||||
indices = constant_op.constant([[0], [1], [2]], dtypes.int64)
|
||||
values = constant_op.constant(np.array([1, 2, 3], np.int64))
|
||||
shape = constant_op.constant([3], dtypes.int64)
|
||||
st = sparse_tensor.SparseTensor(indices, values, shape)
|
||||
st_cast = math_ops.cast(st, dtypes.float32)
|
||||
with self.test_session():
|
||||
self.assertAllEqual(st_cast.indices.eval(), [[0], [1], [2]])
|
||||
self.assertAllEqual(st_cast.values.eval(),
|
||||
@ -189,18 +203,18 @@ class SparseTensorCastTest(tf.test.TestCase):
|
||||
self.assertAllEqual(st_cast.dense_shape.eval(), [3])
|
||||
|
||||
|
||||
class SaturateCastTest(tf.test.TestCase):
|
||||
class SaturateCastTest(test.TestCase):
|
||||
|
||||
def testSaturate(self):
|
||||
in_types = tf.float32,
|
||||
out_types = tf.int8, tf.uint8, tf.int16, tf.float32
|
||||
in_types = dtypes.float32,
|
||||
out_types = dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.float32
|
||||
with self.test_session() as sess:
|
||||
for in_type in in_types:
|
||||
for out_type in out_types:
|
||||
lo, hi = in_type.min, in_type.max
|
||||
x = tf.constant([lo, lo + 1, lo // 2, hi // 2, hi - 1, hi],
|
||||
dtype=in_type)
|
||||
y = tf.saturate_cast(x, dtype=out_type)
|
||||
x = constant_op.constant(
|
||||
[lo, lo + 1, lo // 2, hi // 2, hi - 1, hi], dtype=in_type)
|
||||
y = math_ops.saturate_cast(x, dtype=out_type)
|
||||
self.assertEqual(y.dtype, out_type)
|
||||
x, y = sess.run([x, y])
|
||||
correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
|
||||
@ -208,4 +222,4 @@ class SaturateCastTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -13,16 +13,25 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Tests for tensorflow.ops.tf.Cholesky."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes as dtypes_lib
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import linalg_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.platform import tf_logging
|
||||
|
||||
|
||||
class CholeskyOpTest(tf.test.TestCase):
|
||||
class CholeskyOpTest(test.TestCase):
|
||||
|
||||
def _verifyCholeskyBase(self, sess, x, chol, verification):
|
||||
chol_np, verification_np = sess.run([chol, verification])
|
||||
@ -40,8 +49,8 @@ class CholeskyOpTest(tf.test.TestCase):
|
||||
def _verifyCholesky(self, x):
|
||||
# Verify that LL^T == x.
|
||||
with self.test_session() as sess:
|
||||
chol = tf.cholesky(x)
|
||||
verification = tf.matmul(chol, chol, adjoint_b=True)
|
||||
chol = linalg_ops.cholesky(x)
|
||||
verification = math_ops.matmul(chol, chol, adjoint_b=True)
|
||||
self._verifyCholeskyBase(sess, x, chol, verification)
|
||||
|
||||
def testBasic(self):
|
||||
@ -62,18 +71,18 @@ class CholeskyOpTest(tf.test.TestCase):
|
||||
|
||||
def testNonSquareMatrix(self):
|
||||
with self.assertRaises(ValueError):
|
||||
tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
|
||||
linalg_ops.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
|
||||
with self.assertRaises(ValueError):
|
||||
tf.cholesky(
|
||||
linalg_ops.cholesky(
|
||||
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
|
||||
]))
|
||||
|
||||
def testWrongDimensions(self):
|
||||
tensor3 = tf.constant([1., 2.])
|
||||
tensor3 = constant_op.constant([1., 2.])
|
||||
with self.assertRaises(ValueError):
|
||||
tf.cholesky(tensor3)
|
||||
linalg_ops.cholesky(tensor3)
|
||||
with self.assertRaises(ValueError):
|
||||
tf.cholesky(tensor3)
|
||||
linalg_ops.cholesky(tensor3)
|
||||
|
||||
def testNotInvertible(self):
|
||||
# The input should be invertible.
|
||||
@ -89,7 +98,7 @@ class CholeskyOpTest(tf.test.TestCase):
|
||||
self._verifyCholesky(np.empty([2, 0, 0]))
|
||||
|
||||
|
||||
class CholeskyGradTest(tf.test.TestCase):
|
||||
class CholeskyGradTest(test.TestCase):
|
||||
_backprop_block_size = 32
|
||||
|
||||
def getShapes(self, shapeList):
|
||||
@ -104,50 +113,59 @@ class CholeskyGradTest(tf.test.TestCase):
|
||||
np.random.seed(0)
|
||||
shapes = self.getShapes([self._backprop_block_size + 1])
|
||||
self.runFiniteDifferences(
|
||||
shapes, dtypes=(tf.float32, tf.float64), scalarTest=True)
|
||||
shapes,
|
||||
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
|
||||
scalarTest=True)
|
||||
|
||||
def testTwoBlockMatrixFloat(self):
|
||||
np.random.seed(0)
|
||||
shapes = self.getShapes([2 * self._backprop_block_size + 1])
|
||||
self.runFiniteDifferences(shapes, dtypes=(tf.float32,), scalarTest=True)
|
||||
self.runFiniteDifferences(
|
||||
shapes, dtypes=(dtypes_lib.float32,), scalarTest=True)
|
||||
|
||||
def testTwoBlockMatrixDouble(self):
|
||||
np.random.seed(0)
|
||||
shapes = self.getShapes([2 * self._backprop_block_size + 1])
|
||||
self.runFiniteDifferences(shapes, dtypes=(tf.float64,), scalarTest=True)
|
||||
self.runFiniteDifferences(
|
||||
shapes, dtypes=(dtypes_lib.float64,), scalarTest=True)
|
||||
|
||||
def runFiniteDifferences(self,
|
||||
shapes,
|
||||
dtypes=(tf.float32, tf.float64),
|
||||
dtypes=(dtypes_lib.float32, dtypes_lib.float64),
|
||||
scalarTest=False):
|
||||
with self.test_session(use_gpu=False):
|
||||
for shape in shapes:
|
||||
for batch in False, True:
|
||||
for dtype in dtypes:
|
||||
if not scalarTest:
|
||||
x = tf.constant(np.random.randn(shape[0], shape[1]), dtype)
|
||||
tensor = tf.matmul(x, tf.transpose(x)) / shape[0]
|
||||
x = constant_op.constant(
|
||||
np.random.randn(shape[0], shape[1]), dtype)
|
||||
tensor = math_ops.matmul(x, array_ops.transpose(x)) / shape[0]
|
||||
else:
|
||||
# This is designed to be a faster test for larger matrices.
|
||||
x = tf.constant(np.random.randn(), dtype)
|
||||
R = tf.constant(np.random.randn(shape[0], shape[1]), dtype)
|
||||
e = tf.mul(R, x)
|
||||
tensor = tf.matmul(e, tf.transpose(e)) / shape[0]
|
||||
x = constant_op.constant(np.random.randn(), dtype)
|
||||
R = constant_op.constant(
|
||||
np.random.randn(shape[0], shape[1]), dtype)
|
||||
e = math_ops.mul(R, x)
|
||||
tensor = math_ops.matmul(e, array_ops.transpose(e)) / shape[0]
|
||||
|
||||
# Inner-most matrices in tensor are positive definite.
|
||||
if batch:
|
||||
tensor = tf.tile(tf.expand_dims(tensor, 0), [4, 1, 1])
|
||||
y = tf.cholesky(tensor)
|
||||
tensor = array_ops.tile(
|
||||
array_ops.expand_dims(tensor, 0), [4, 1, 1])
|
||||
y = linalg_ops.cholesky(tensor)
|
||||
if scalarTest:
|
||||
y = tf.reduce_mean(y)
|
||||
error = tf.test.compute_gradient_error(x, x._shape_as_list(), y,
|
||||
y._shape_as_list())
|
||||
tf.logging.info("error = %f", error)
|
||||
if dtype == tf.float64:
|
||||
y = math_ops.reduce_mean(y)
|
||||
error = gradient_checker.compute_gradient_error(x,
|
||||
x._shape_as_list(),
|
||||
y,
|
||||
y._shape_as_list())
|
||||
tf_logging.info("error = %f", error)
|
||||
if dtype == dtypes_lib.float64:
|
||||
self.assertLess(error, 1e-5)
|
||||
else:
|
||||
self.assertLess(error, 3e-3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,36 +12,37 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.ops.clip_ops."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import clip_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class ClipTest(tf.test.TestCase):
|
||||
class ClipTest(test.TestCase):
|
||||
|
||||
# ClipByValue test
|
||||
def testClipByValue(self):
|
||||
with self.test_session():
|
||||
x = tf.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
|
||||
np_ans = [[-4.4, 2.0, 3.0],
|
||||
[4.0, 4.4, 4.4]]
|
||||
x = constant_op.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
|
||||
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
|
||||
clip_value = 4.4
|
||||
ans = tf.clip_by_value(x, -clip_value, clip_value)
|
||||
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
|
||||
def testClipByValueNonFinite(self):
|
||||
with self.test_session():
|
||||
x = tf.constant([float('NaN'), float('Inf'), -float('Inf')])
|
||||
x = constant_op.constant([float('NaN'), float('Inf'), -float('Inf')])
|
||||
np_ans = [float('NaN'), 4.0, -4.0]
|
||||
clip_value = 4.0
|
||||
ans = tf.clip_by_value(x, -clip_value, clip_value)
|
||||
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -50,16 +51,15 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByNormClipped(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.test_session():
|
||||
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Norm of x = sqrt(3^2 + 4^2) = 5
|
||||
np_ans = [[-2.4, 0.0, 0.0],
|
||||
[3.2, 0.0, 0.0]]
|
||||
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
|
||||
clip_norm = 4.0
|
||||
ans = tf.clip_by_norm(x, clip_norm)
|
||||
ans = clip_ops.clip_by_norm(x, clip_norm)
|
||||
tf_ans = ans.eval()
|
||||
|
||||
clip_tensor = tf.constant(4.0)
|
||||
ans = tf.clip_by_norm(x, clip_norm)
|
||||
clip_tensor = constant_op.constant(4.0)
|
||||
ans = clip_ops.clip_by_norm(x, clip_norm)
|
||||
tf_ans_tensor = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -68,12 +68,11 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByNormNotClipped(self):
|
||||
# No norm clipping when clip_norm >= 5
|
||||
with self.test_session():
|
||||
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Norm of x = sqrt(3^2 + 4^2) = 5
|
||||
np_ans = [[-3.0, 0.0, 0.0],
|
||||
[4.0, 0.0, 0.0]]
|
||||
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
|
||||
clip_norm = 6.0
|
||||
ans = tf.clip_by_norm(x, clip_norm)
|
||||
ans = clip_ops.clip_by_norm(x, clip_norm)
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -81,12 +80,11 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByNormZero(self):
|
||||
# No norm clipping when norm = 0
|
||||
with self.test_session():
|
||||
x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Norm = 0, no changes
|
||||
np_ans = [[0.0, 0.0, 0.0],
|
||||
[0.0, 0.0, 0.0]]
|
||||
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
||||
clip_norm = 6.0
|
||||
ans = tf.clip_by_norm(x, clip_norm)
|
||||
ans = clip_ops.clip_by_norm(x, clip_norm)
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -94,12 +92,11 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByNormClippedWithDim0(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.test_session():
|
||||
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||
# Norm of x[:, 0] = sqrt(3^2 + 4^2) = 5, x[:, 2] = 3
|
||||
np_ans = [[-2.4, 0.0, 0.0],
|
||||
[3.2, 0.0, 3.0]]
|
||||
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
|
||||
clip_norm = 4.0
|
||||
ans = tf.clip_by_norm(x, clip_norm, [0])
|
||||
ans = clip_ops.clip_by_norm(x, clip_norm, [0])
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -107,12 +104,11 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByNormClippedWithDim1(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.test_session():
|
||||
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
|
||||
np_ans = [[-3.0, 0.0, 0.0],
|
||||
[3.2, 0.0, 2.4]]
|
||||
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
|
||||
clip_norm = 4.0
|
||||
ans = tf.clip_by_norm(x, clip_norm, [1])
|
||||
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -120,12 +116,11 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByNormNotClippedWithAxes(self):
|
||||
# No norm clipping when clip_norm >= 5
|
||||
with self.test_session():
|
||||
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 3.0], shape=[2, 3])
|
||||
# Norm of x[0, :] = 3, x[1, :] = sqrt(3^2 + 4^2) = 5
|
||||
np_ans = [[-3.0, 0.0, 0.0],
|
||||
[4.0, 0.0, 3.0]]
|
||||
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
|
||||
clip_norm = 6.0
|
||||
ans = tf.clip_by_norm(x, clip_norm, [1])
|
||||
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -134,17 +129,16 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByGlobalNormClipped(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.test_session():
|
||||
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = tf.constant([1.0, -2.0])
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([1.0, -2.0])
|
||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||
clip_norm = 4.0
|
||||
|
||||
# Answers are the original tensors scaled by 4.0/5.0
|
||||
np_ans_0 = [[-1.6, 0.0, 0.0],
|
||||
[3.2, 0.0, 0.0]]
|
||||
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
|
||||
np_ans_1 = [0.8, -1.6]
|
||||
|
||||
ans, norm = tf.clip_by_global_norm((x0, x1), clip_norm)
|
||||
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
|
||||
tf_ans_1 = ans[0].eval()
|
||||
tf_ans_2 = ans[1].eval()
|
||||
tf_norm = norm.eval()
|
||||
@ -156,17 +150,16 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByGlobalNormClippedTensor(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.test_session():
|
||||
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = tf.constant([1.0, -2.0])
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([1.0, -2.0])
|
||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||
clip_norm = tf.constant(4.0)
|
||||
clip_norm = constant_op.constant(4.0)
|
||||
|
||||
# Answers are the original tensors scaled by 4.0/5.0
|
||||
np_ans_0 = [[-1.6, 0.0, 0.0],
|
||||
[3.2, 0.0, 0.0]]
|
||||
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
|
||||
np_ans_1 = [0.8, -1.6]
|
||||
|
||||
ans, norm = tf.clip_by_global_norm((x0, x1), clip_norm)
|
||||
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
|
||||
tf_ans_1 = ans[0].eval()
|
||||
tf_ans_2 = ans[1].eval()
|
||||
tf_norm = norm.eval()
|
||||
@ -178,17 +171,16 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByGlobalNormSupportsNone(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.test_session():
|
||||
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = tf.constant([1.0, -2.0])
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([1.0, -2.0])
|
||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||
clip_norm = 4.0
|
||||
|
||||
# Answers are the original tensors scaled by 4.0/5.0
|
||||
np_ans_0 = [[-1.6, 0.0, 0.0],
|
||||
[3.2, 0.0, 0.0]]
|
||||
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
|
||||
np_ans_1 = [0.8, -1.6]
|
||||
|
||||
ans, norm = tf.clip_by_global_norm((x0, None, x1, None), clip_norm)
|
||||
ans, norm = clip_ops.clip_by_global_norm((x0, None, x1, None), clip_norm)
|
||||
self.assertTrue(ans[1] is None)
|
||||
self.assertTrue(ans[3] is None)
|
||||
tf_ans_1 = ans[0].eval()
|
||||
@ -202,18 +194,17 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByGlobalNormWithIndexedSlicesClipped(self):
|
||||
# Norm clipping when clip_norm < 5
|
||||
with self.test_session():
|
||||
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = tf.IndexedSlices(tf.constant([1.0, -2.0]),
|
||||
tf.constant([3, 4]))
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = ops.IndexedSlices(
|
||||
constant_op.constant([1.0, -2.0]), constant_op.constant([3, 4]))
|
||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||
clip_norm = 4.0
|
||||
|
||||
# Answers are the original tensors scaled by 4.0/5.0
|
||||
np_ans_0 = [[-1.6, 0.0, 0.0],
|
||||
[3.2, 0.0, 0.0]]
|
||||
np_ans_0 = [[-1.6, 0.0, 0.0], [3.2, 0.0, 0.0]]
|
||||
np_ans_1 = [0.8, -1.6]
|
||||
|
||||
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
|
||||
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
|
||||
tf_ans_1 = ans[0].eval()
|
||||
tf_ans_2 = ans[1].values.eval()
|
||||
tf_norm = norm.eval()
|
||||
@ -224,11 +215,11 @@ class ClipTest(tf.test.TestCase):
|
||||
|
||||
def testClipByGlobalNormPreservesDenseShape(self):
|
||||
dense_shape = (1,)
|
||||
slices = tf.IndexedSlices(
|
||||
tf.constant([1.0]),
|
||||
tf.constant([0]),
|
||||
slices = ops.IndexedSlices(
|
||||
constant_op.constant([1.0]),
|
||||
constant_op.constant([0]),
|
||||
dense_shape=dense_shape)
|
||||
ans, _ = tf.clip_by_global_norm([slices], 1.0)
|
||||
ans, _ = clip_ops.clip_by_global_norm([slices], 1.0)
|
||||
modified_slices = ans[0]
|
||||
self.assertEqual(dense_shape, slices.dense_shape)
|
||||
self.assertEqual(dense_shape, modified_slices.dense_shape)
|
||||
@ -236,15 +227,14 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByGlobalNormNotClipped(self):
|
||||
# No norm clipping when clip_norm >= 5
|
||||
with self.test_session():
|
||||
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = tf.constant([1.0, -2.0])
|
||||
x0 = constant_op.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([1.0, -2.0])
|
||||
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
|
||||
np_ans_0 = [[-2.0, 0.0, 0.0],
|
||||
[4.0, 0.0, 0.0]]
|
||||
np_ans_0 = [[-2.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
|
||||
np_ans_1 = [1.0, -2.0]
|
||||
clip_norm = 6.0
|
||||
|
||||
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
|
||||
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
|
||||
tf_ans_1 = ans[0].eval()
|
||||
tf_ans_2 = ans[1].eval()
|
||||
tf_norm = norm.eval()
|
||||
@ -256,15 +246,14 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByGlobalNormZero(self):
|
||||
# No norm clipping when norm = 0
|
||||
with self.test_session():
|
||||
x0 = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = tf.constant([0.0, 0.0])
|
||||
x0 = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||
x1 = constant_op.constant([0.0, 0.0])
|
||||
# Norm = 0, no changes
|
||||
np_ans_0 = [[0.0, 0.0, 0.0],
|
||||
[0.0, 0.0, 0.0]]
|
||||
np_ans_0 = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
||||
np_ans_1 = [0.0, 0.0]
|
||||
clip_norm = 6.0
|
||||
|
||||
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
|
||||
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
|
||||
tf_ans_1 = ans[0].eval()
|
||||
tf_ans_2 = ans[1].eval()
|
||||
tf_norm = norm.eval()
|
||||
@ -276,12 +265,11 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByAverageNormClipped(self):
|
||||
# Norm clipping when average clip_norm < 0.83333333
|
||||
with self.test_session():
|
||||
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||
np_ans = [[-2.88, 0.0, 0.0],
|
||||
[3.84, 0.0, 0.0]]
|
||||
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
||||
clip_norm = 0.8
|
||||
ans = tf.clip_by_average_norm(x, clip_norm)
|
||||
ans = clip_ops.clip_by_average_norm(x, clip_norm)
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -289,12 +277,11 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByAverageNormClippedTensor(self):
|
||||
# Norm clipping when average clip_norm < 0.83333333
|
||||
with self.test_session():
|
||||
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||
np_ans = [[-2.88, 0.0, 0.0],
|
||||
[3.84, 0.0, 0.0]]
|
||||
clip_norm = tf.constant(0.8)
|
||||
ans = tf.clip_by_average_norm(x, clip_norm)
|
||||
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
|
||||
clip_norm = constant_op.constant(0.8)
|
||||
ans = clip_ops.clip_by_average_norm(x, clip_norm)
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -302,12 +289,11 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByAverageNormNotClipped(self):
|
||||
# No norm clipping when average clip_norm >= 0.83333333
|
||||
with self.test_session():
|
||||
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||
np_ans = [[-3.0, 0.0, 0.0],
|
||||
[4.0, 0.0, 0.0]]
|
||||
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
|
||||
clip_norm = 0.9
|
||||
ans = tf.clip_by_average_norm(x, clip_norm)
|
||||
ans = clip_ops.clip_by_average_norm(x, clip_norm)
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
@ -315,15 +301,15 @@ class ClipTest(tf.test.TestCase):
|
||||
def testClipByAverageNormZero(self):
|
||||
# No norm clipping when average clip_norm = 0
|
||||
with self.test_session():
|
||||
x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||
x = constant_op.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm = 0, no changes
|
||||
np_ans = [[0.0, 0.0, 0.0],
|
||||
[0.0, 0.0, 0.0]]
|
||||
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
|
||||
clip_norm = 0.9
|
||||
ans = tf.clip_by_average_norm(x, clip_norm)
|
||||
ans = clip_ops.clip_by_average_norm(x, clip_norm)
|
||||
tf_ans = ans.eval()
|
||||
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
|
||||
if __name__ == '__main__':
|
||||
test.main()
|
||||
|
@ -12,36 +12,44 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Functional tests for Concat Op."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import errors_impl
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gen_array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import gradients_impl
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
def _call_concat(values, axis, use_concat_v2):
|
||||
if use_concat_v2:
|
||||
return gen_array_ops._concat_v2(values, axis)
|
||||
else:
|
||||
return tf.concat(axis, values)
|
||||
return array_ops.concat(axis, values)
|
||||
|
||||
|
||||
class ConcatOpTest(tf.test.TestCase):
|
||||
class ConcatOpTest(test.TestCase):
|
||||
|
||||
def testHStack(self):
|
||||
with self.test_session():
|
||||
p1 = tf.placeholder(tf.float32, shape=[4, 4])
|
||||
p2 = tf.placeholder(tf.float32, shape=[4, 4])
|
||||
c = tf.concat(0, [p1, p2])
|
||||
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||
c = array_ops.concat(0, [p1, p2])
|
||||
params = {
|
||||
p1: np.random.rand(4, 4).astype("f"),
|
||||
p2: np.random.rand(4, 4).astype("f")
|
||||
}
|
||||
}
|
||||
result = c.eval(feed_dict=params)
|
||||
|
||||
self.assertEqual(result.shape, c.get_shape())
|
||||
@ -50,13 +58,13 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
|
||||
def testVStack(self):
|
||||
with self.test_session():
|
||||
p1 = tf.placeholder(tf.float32, shape=[4, 4])
|
||||
p2 = tf.placeholder(tf.float32, shape=[4, 4])
|
||||
c = tf.concat(1, [p1, p2])
|
||||
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
|
||||
c = array_ops.concat(1, [p1, p2])
|
||||
params = {
|
||||
p1: np.random.rand(4, 4).astype("f"),
|
||||
p2: np.random.rand(4, 4).astype("f")
|
||||
}
|
||||
}
|
||||
result = c.eval(feed_dict=params)
|
||||
|
||||
self.assertEqual(result.shape, c.get_shape())
|
||||
@ -67,9 +75,9 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
with self.test_session(use_gpu=True):
|
||||
p1 = np.random.rand(2, 3).astype("i")
|
||||
p2 = np.random.rand(2, 3).astype("i")
|
||||
x1 = tf.constant(p1)
|
||||
x2 = tf.constant(p2)
|
||||
c = tf.concat(0, [x1, x2])
|
||||
x1 = constant_op.constant(p1)
|
||||
x2 = constant_op.constant(p2)
|
||||
c = array_ops.concat(0, [x1, x2])
|
||||
result = c.eval()
|
||||
self.assertAllEqual(result[:2, :], p1)
|
||||
self.assertAllEqual(result[2:, :], p2)
|
||||
@ -78,10 +86,10 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
with self.test_session():
|
||||
p1 = np.random.rand(4, 4).astype("f")
|
||||
p2 = np.random.rand(4, 4).astype("f")
|
||||
v1 = tf.Variable(p1)
|
||||
v2 = tf.Variable(p2)
|
||||
c = tf.concat(0, [v1, v2])
|
||||
tf.global_variables_initializer().run()
|
||||
v1 = variables.Variable(p1)
|
||||
v2 = variables.Variable(p2)
|
||||
c = array_ops.concat(0, [v1, v2])
|
||||
variables.global_variables_initializer().run()
|
||||
result = c.eval()
|
||||
|
||||
self.assertEqual(result.shape, c.get_shape())
|
||||
@ -96,8 +104,8 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
# Random dim to concat on
|
||||
concat_dim = np.random.randint(5)
|
||||
params = {}
|
||||
if dtype == tf.bfloat16:
|
||||
dtype_feed = tf.float32
|
||||
if dtype == dtypes.bfloat16:
|
||||
dtype_feed = dtypes.float32
|
||||
else:
|
||||
dtype_feed = dtype
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
@ -105,19 +113,19 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
for i in np.arange(num_tensors):
|
||||
input_shape = shape
|
||||
input_shape[concat_dim] = np.random.randint(1, 5)
|
||||
placeholder = tf.placeholder(dtype_feed, shape=input_shape)
|
||||
placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
|
||||
p.append(placeholder)
|
||||
|
||||
t = dtype_feed.as_numpy_dtype
|
||||
params[placeholder] = np.random.rand(*input_shape).astype(t)
|
||||
|
||||
if dtype != dtype_feed:
|
||||
concat_inputs = [tf.cast(p_i, dtype) for p_i in p]
|
||||
concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
|
||||
else:
|
||||
concat_inputs = p
|
||||
c = tf.concat(concat_dim, concat_inputs)
|
||||
c = array_ops.concat(concat_dim, concat_inputs)
|
||||
if dtype != dtype_feed:
|
||||
c = tf.cast(c, dtype_feed)
|
||||
c = math_ops.cast(c, dtype_feed)
|
||||
result = c.eval(feed_dict=params)
|
||||
|
||||
self.assertEqual(result.shape, c.get_shape())
|
||||
@ -137,33 +145,33 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
self.assertAllClose(result[ind], params[p[i]], 0.01)
|
||||
|
||||
def testRandom(self):
|
||||
self._testRandom(tf.float32)
|
||||
self._testRandom(tf.float32, use_gpu=True)
|
||||
self._testRandom(tf.int16)
|
||||
self._testRandom(tf.int32, use_gpu=True)
|
||||
self._testRandom(tf.bfloat16)
|
||||
self._testRandom(tf.bfloat16, use_gpu=True)
|
||||
self._testRandom(dtypes.float32)
|
||||
self._testRandom(dtypes.float32, use_gpu=True)
|
||||
self._testRandom(dtypes.int16)
|
||||
self._testRandom(dtypes.int32, use_gpu=True)
|
||||
self._testRandom(dtypes.bfloat16)
|
||||
self._testRandom(dtypes.bfloat16, use_gpu=True)
|
||||
|
||||
def testInvalidConcatDimTypeAndShape(self):
|
||||
a = tf.Variable(tf.constant(1.0, shape=[1]))
|
||||
b = tf.Variable(tf.constant(2.0, shape=[1]))
|
||||
a = variables.Variable(constant_op.constant(1.0, shape=[1]))
|
||||
b = variables.Variable(constant_op.constant(2.0, shape=[1]))
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat(a, b)
|
||||
array_ops.concat(a, b)
|
||||
with self.assertRaises(TypeError):
|
||||
tf.concat(4.2, 1)
|
||||
array_ops.concat(4.2, 1)
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat(a, 1)
|
||||
array_ops.concat(a, 1)
|
||||
with self.assertRaises(TypeError):
|
||||
tf.concat(a, [a, b])
|
||||
array_ops.concat(a, [a, b])
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat([3], [a, b])
|
||||
array_ops.concat([3], [a, b])
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat(0, [])
|
||||
array_ops.concat(0, [])
|
||||
# An integer tensor for shape dim should throw no error.
|
||||
tf.concat(tf.constant(0, shape=[]), 1)
|
||||
array_ops.concat(constant_op.constant(0, shape=[]), 1)
|
||||
# A non-scalar tensor for shape should throw ValueError.
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat(tf.constant(0, shape=[1]), 1)
|
||||
array_ops.concat(constant_op.constant(0, shape=[1]), 1)
|
||||
|
||||
def _testGradientsSimple(self, use_gpu, use_concat_v2):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
@ -174,14 +182,16 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
t = np.random.rand(*shape).astype("f")
|
||||
inp.append(t)
|
||||
inp_tensors.append(
|
||||
tf.constant([float(y) for y in t.flatten()],
|
||||
shape=shape, dtype=tf.float32))
|
||||
constant_op.constant(
|
||||
[float(y) for y in t.flatten()],
|
||||
shape=shape,
|
||||
dtype=dtypes.float32))
|
||||
c = _call_concat(inp_tensors, 1, use_concat_v2)
|
||||
output_shape = [10, 9, 2]
|
||||
grad_inp = np.random.rand(*output_shape).astype("f")
|
||||
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
|
||||
shape=output_shape)
|
||||
grad = tf.gradients([c], inp_tensors, [grad_tensor])
|
||||
grad_tensor = constant_op.constant(
|
||||
[float(x) for x in grad_inp.flatten()], shape=output_shape)
|
||||
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
|
||||
concated_grad = _call_concat(grad, 1, use_concat_v2)
|
||||
result = concated_grad.eval()
|
||||
self.assertAllEqual(result, grad_inp)
|
||||
@ -201,14 +211,16 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
t = np.random.rand(*shape).astype("f")
|
||||
inp.append(t)
|
||||
inp_tensors.append(
|
||||
tf.constant([float(y) for y in t.flatten()],
|
||||
shape=shape, dtype=tf.float32))
|
||||
constant_op.constant(
|
||||
[float(y) for y in t.flatten()],
|
||||
shape=shape,
|
||||
dtype=dtypes.float32))
|
||||
c = _call_concat(inp_tensors, 0, use_concat_v2)
|
||||
output_shape = [9, 10, 2]
|
||||
grad_inp = np.random.rand(*output_shape).astype("f")
|
||||
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
|
||||
shape=output_shape)
|
||||
grad = tf.gradients([c], inp_tensors, [grad_tensor])
|
||||
grad_tensor = constant_op.constant(
|
||||
[float(x) for x in grad_inp.flatten()], shape=output_shape)
|
||||
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
|
||||
concated_grad = _call_concat(grad, 0, use_concat_v2)
|
||||
result = concated_grad.eval()
|
||||
|
||||
@ -229,14 +241,16 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
t = np.random.rand(*shape).astype("f")
|
||||
inp.append(t)
|
||||
inp_tensors.append(
|
||||
tf.constant([float(y) for y in t.flatten()],
|
||||
shape=shape, dtype=tf.float32))
|
||||
constant_op.constant(
|
||||
[float(y) for y in t.flatten()],
|
||||
shape=shape,
|
||||
dtype=dtypes.float32))
|
||||
c = _call_concat(inp_tensors, 2, use_concat_v2)
|
||||
output_shape = [10, 2, 9]
|
||||
grad_inp = np.random.rand(*output_shape).astype("f")
|
||||
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
|
||||
shape=output_shape)
|
||||
grad = tf.gradients([c], inp_tensors, [grad_tensor])
|
||||
grad_tensor = constant_op.constant(
|
||||
[float(x) for x in grad_inp.flatten()], shape=output_shape)
|
||||
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
|
||||
concated_grad = _call_concat(grad, 2, use_concat_v2)
|
||||
result = concated_grad.eval()
|
||||
|
||||
@ -265,15 +279,17 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
t = np.random.rand(*shape).astype("f")
|
||||
inp.append(t)
|
||||
inp_tensors.append(
|
||||
tf.constant([float(y) for y in t.flatten()],
|
||||
shape=shape, dtype=tf.float32))
|
||||
constant_op.constant(
|
||||
[float(y) for y in t.flatten()],
|
||||
shape=shape,
|
||||
dtype=dtypes.float32))
|
||||
c = _call_concat(inp_tensors, concat_dim, use_concat_v2)
|
||||
output_shape = input_shape
|
||||
output_shape[concat_dim] = concat_dim_sizes.sum()
|
||||
grad_inp = np.random.rand(*output_shape).astype("f")
|
||||
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
|
||||
shape=output_shape)
|
||||
grad = tf.gradients([c], inp_tensors, [grad_tensor])
|
||||
grad_tensor = constant_op.constant(
|
||||
[float(x) for x in grad_inp.flatten()], shape=output_shape)
|
||||
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
|
||||
concated_grad = _call_concat(grad, concat_dim, use_concat_v2)
|
||||
result = concated_grad.eval()
|
||||
|
||||
@ -289,41 +305,57 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
def testShapeError(self):
|
||||
# Rank doesn't match.
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat(1, [tf.constant(10.0, shape=[4, 4, 4, 4]),
|
||||
tf.constant(20.0, shape=[4, 4, 4])])
|
||||
array_ops.concat(
|
||||
1, [
|
||||
constant_op.constant(
|
||||
10.0, shape=[4, 4, 4, 4]), constant_op.constant(
|
||||
20.0, shape=[4, 4, 4])
|
||||
])
|
||||
|
||||
# Dimensions don't match in a non-concat dim.
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat(1, [tf.constant(10.0, shape=[1, 2, 1]),
|
||||
tf.constant(20.0, shape=[3, 2, 1])])
|
||||
array_ops.concat(
|
||||
1, [
|
||||
constant_op.constant(
|
||||
10.0, shape=[1, 2, 1]), constant_op.constant(
|
||||
20.0, shape=[3, 2, 1])
|
||||
])
|
||||
|
||||
# concat_dim out of range.
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat(3, [tf.constant(10.0, shape=[4, 4, 4]),
|
||||
tf.constant(20.0, shape=[4, 4, 4])])
|
||||
array_ops.concat(
|
||||
3, [
|
||||
constant_op.constant(
|
||||
10.0, shape=[4, 4, 4]), constant_op.constant(
|
||||
20.0, shape=[4, 4, 4])
|
||||
])
|
||||
|
||||
# concat_dim out of range
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat(-4, [tf.constant(10.0, shape=[4, 4, 4]),
|
||||
tf.constant(20.0, shape=[4, 4, 4])])
|
||||
array_ops.concat(
|
||||
-4, [
|
||||
constant_op.constant(
|
||||
10.0, shape=[4, 4, 4]), constant_op.constant(
|
||||
20.0, shape=[4, 4, 4])
|
||||
])
|
||||
|
||||
def testShapeWithUnknownConcatDim(self):
|
||||
p1 = tf.placeholder(tf.float32)
|
||||
c1 = tf.constant(10.0, shape=[4, 4, 4, 4])
|
||||
p2 = tf.placeholder(tf.float32)
|
||||
c2 = tf.constant(20.0, shape=[4, 4, 4, 4])
|
||||
dim = tf.placeholder(tf.int32)
|
||||
concat = tf.concat(dim, [p1, c1, p2, c2])
|
||||
p1 = array_ops.placeholder(dtypes.float32)
|
||||
c1 = constant_op.constant(10.0, shape=[4, 4, 4, 4])
|
||||
p2 = array_ops.placeholder(dtypes.float32)
|
||||
c2 = constant_op.constant(20.0, shape=[4, 4, 4, 4])
|
||||
dim = array_ops.placeholder(dtypes.int32)
|
||||
concat = array_ops.concat(dim, [p1, c1, p2, c2])
|
||||
self.assertEqual(4, concat.get_shape().ndims)
|
||||
|
||||
# All dimensions unknown.
|
||||
concat2 = tf.concat(dim, [p1, p2])
|
||||
concat2 = array_ops.concat(dim, [p1, p2])
|
||||
self.assertEqual(None, concat2.get_shape())
|
||||
|
||||
# Rank doesn't match.
|
||||
c3 = tf.constant(30.0, shape=[4, 4, 4])
|
||||
c3 = constant_op.constant(30.0, shape=[4, 4, 4])
|
||||
with self.assertRaises(ValueError):
|
||||
tf.concat(dim, [p1, c1, p2, c3])
|
||||
array_ops.concat(dim, [p1, c1, p2, c3])
|
||||
|
||||
def testZeroSize(self):
|
||||
# Verify that concat doesn't crash and burn for zero size inputs
|
||||
@ -339,87 +371,102 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
x1 = np.random.randn(*(shape0 + (n1,) + shape1))
|
||||
correct = np.concatenate([x0, x1], axis=axis)
|
||||
# TODO(irving): Make tf.concat handle map, then drop list().
|
||||
xs = list(map(tf.constant, [x0, x1]))
|
||||
c = tf.concat(axis, xs)
|
||||
xs = list(map(constant_op.constant, [x0, x1]))
|
||||
c = array_ops.concat(axis, xs)
|
||||
self.assertAllEqual(c.eval(), correct)
|
||||
# Check gradients
|
||||
dc = np.random.randn(*c.get_shape().as_list())
|
||||
dxs = sess.run(tf.gradients(c, xs, dc))
|
||||
dxs = sess.run(gradients_impl.gradients(c, xs, dc))
|
||||
self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
|
||||
|
||||
def testTensorConcatDim0Grad(self):
|
||||
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
|
||||
output_shape = [44, 7, 3]
|
||||
x_vals = [np.random.random_sample(x_shape).astype(
|
||||
np.float64) for x_shape in x_shapes]
|
||||
x_vals = [
|
||||
np.random.random_sample(x_shape).astype(np.float64)
|
||||
for x_shape in x_shapes
|
||||
]
|
||||
with self.test_session():
|
||||
xs = [tf.constant(x_val) for x_val in x_vals]
|
||||
output = tf.concat(0, xs)
|
||||
err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
|
||||
xs = [constant_op.constant(x_val) for x_val in x_vals]
|
||||
output = array_ops.concat(0, xs)
|
||||
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
|
||||
output_shape)
|
||||
self.assertLess(err, 1e-11)
|
||||
|
||||
def testTensorConcatDim1Grad(self):
|
||||
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
|
||||
output_shape = [20, 11, 3]
|
||||
x_vals = [np.random.random_sample(x_shape).astype(
|
||||
np.float64) for x_shape in x_shapes]
|
||||
x_vals = [
|
||||
np.random.random_sample(x_shape).astype(np.float64)
|
||||
for x_shape in x_shapes
|
||||
]
|
||||
with self.test_session():
|
||||
xs = [tf.constant(x_val) for x_val in x_vals]
|
||||
output = tf.concat(1, xs)
|
||||
err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
|
||||
xs = [constant_op.constant(x_val) for x_val in x_vals]
|
||||
output = array_ops.concat(1, xs)
|
||||
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
|
||||
output_shape)
|
||||
self.assertLess(err, 1e-11)
|
||||
|
||||
def testIndexedSlicesConcatDim0Grad(self):
|
||||
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
|
||||
output_shape = [4, 7, 3]
|
||||
x_vals = [np.random.random_sample(x_shape).astype(
|
||||
np.float64) for x_shape in x_shapes]
|
||||
x_vals = [
|
||||
np.random.random_sample(x_shape).astype(np.float64)
|
||||
for x_shape in x_shapes
|
||||
]
|
||||
with self.test_session():
|
||||
xs = [tf.constant(x_val) for x_val in x_vals]
|
||||
x_concat = tf.concat(0, xs)
|
||||
output = tf.gather(x_concat, [1, 2, 0, 5])
|
||||
err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
|
||||
xs = [constant_op.constant(x_val) for x_val in x_vals]
|
||||
x_concat = array_ops.concat(0, xs)
|
||||
output = array_ops.gather(x_concat, [1, 2, 0, 5])
|
||||
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
|
||||
output_shape)
|
||||
self.assertLess(err, 1e-11)
|
||||
|
||||
def testIndexedSlicesConcatDim1Grad(self):
|
||||
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
|
||||
output_shape = [4, 11, 3]
|
||||
x_vals = [np.random.random_sample(x_shape).astype(
|
||||
np.float64) for x_shape in x_shapes]
|
||||
x_vals = [
|
||||
np.random.random_sample(x_shape).astype(np.float64)
|
||||
for x_shape in x_shapes
|
||||
]
|
||||
with self.test_session():
|
||||
xs = [tf.constant(x_val) for x_val in x_vals]
|
||||
x_concat = tf.concat(1, xs)
|
||||
output = tf.gather(x_concat, [1, 2, 0, 5])
|
||||
err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
|
||||
xs = [constant_op.constant(x_val) for x_val in x_vals]
|
||||
x_concat = array_ops.concat(1, xs)
|
||||
output = array_ops.gather(x_concat, [1, 2, 0, 5])
|
||||
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
|
||||
output_shape)
|
||||
self.assertLess(err, 1e-11)
|
||||
|
||||
def testIndexedSlicesConcatDim2Grad(self):
|
||||
x_shapes = [[20, 7, 3], [20, 7, 1], [20, 7, 2]]
|
||||
output_shape = [4, 7, 6]
|
||||
x_vals = [np.random.random_sample(x_shape).astype(
|
||||
np.float64) for x_shape in x_shapes]
|
||||
x_vals = [
|
||||
np.random.random_sample(x_shape).astype(np.float64)
|
||||
for x_shape in x_shapes
|
||||
]
|
||||
with self.test_session():
|
||||
xs = [tf.constant(x_val) for x_val in x_vals]
|
||||
x_concat = tf.concat(2, xs)
|
||||
output = tf.gather(x_concat, [1, 2, 0, 5])
|
||||
err = tf.test.compute_gradient_error(xs, x_shapes, output, output_shape)
|
||||
xs = [constant_op.constant(x_val) for x_val in x_vals]
|
||||
x_concat = array_ops.concat(2, xs)
|
||||
output = array_ops.gather(x_concat, [1, 2, 0, 5])
|
||||
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
|
||||
output_shape)
|
||||
self.assertLess(err, 1e-11)
|
||||
|
||||
def testConcatTuple(self):
|
||||
c1 = np.random.rand(4, 4)
|
||||
c2 = np.random.rand(4, 4)
|
||||
with self.test_session():
|
||||
concat_list_t = tf.concat(0, [c1, c2])
|
||||
concat_tuple_t = tf.concat(0, (c1, c2))
|
||||
concat_list_t = array_ops.concat(0, [c1, c2])
|
||||
concat_tuple_t = array_ops.concat(0, (c1, c2))
|
||||
self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())
|
||||
|
||||
def testConcatNoScalars(self):
|
||||
with self.test_session():
|
||||
scalar = tf.constant(7)
|
||||
dim = tf.placeholder(tf.int32)
|
||||
scalar = constant_op.constant(7)
|
||||
dim = array_ops.placeholder(dtypes.int32)
|
||||
with self.assertRaisesRegexp(
|
||||
ValueError, r"Can't concatenate scalars \(use tf\.pack instead\)"):
|
||||
tf.concat(dim, [scalar, scalar, scalar])
|
||||
array_ops.concat(dim, [scalar, scalar, scalar])
|
||||
|
||||
# important as gpu implementation could fail if
|
||||
# shared memory is not large for all the inputs
|
||||
@ -429,19 +476,19 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
params = {}
|
||||
p = []
|
||||
shape = np.array([7, 13])
|
||||
if tf.test.is_gpu_available():
|
||||
if test.is_gpu_available():
|
||||
num_tensors = 10000
|
||||
else:
|
||||
num_tensors = 1000
|
||||
for i in np.arange(num_tensors):
|
||||
input_shape = shape
|
||||
placeholder = tf.placeholder(tf.float32, shape=input_shape)
|
||||
placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
|
||||
p.append(placeholder)
|
||||
|
||||
params[placeholder] = np.random.rand(*input_shape).astype(np.float32)
|
||||
|
||||
concat_inputs = p
|
||||
c = tf.concat(concat_dim, concat_inputs)
|
||||
c = array_ops.concat(concat_dim, concat_inputs)
|
||||
result = c.eval(feed_dict=params)
|
||||
|
||||
self.assertEqual(result.shape, c.get_shape())
|
||||
@ -462,19 +509,16 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
t1 = [[1, 2, 3], [4, 5, 6]]
|
||||
t2 = [[7, 8, 9], [10, 11, 12]]
|
||||
|
||||
c = tf.concat(-2, [t1, t2])
|
||||
c = array_ops.concat(-2, [t1, t2])
|
||||
output = c.eval()
|
||||
self.assertEqual([4, 3], c.get_shape().as_list())
|
||||
self.assertAllEqual(
|
||||
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
|
||||
output)
|
||||
self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
|
||||
output)
|
||||
|
||||
c = tf.concat(-1, [t1, t2])
|
||||
c = array_ops.concat(-1, [t1, t2])
|
||||
self.assertEqual([2, 6], c.get_shape().as_list())
|
||||
output = c.eval()
|
||||
self.assertAllEqual(
|
||||
[[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]],
|
||||
output)
|
||||
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
|
||||
|
||||
def testConcatV2Empty(self):
|
||||
with self.test_session(use_gpu=True):
|
||||
@ -498,72 +542,70 @@ class ConcatOpTest(tf.test.TestCase):
|
||||
c = gen_array_ops._concat_v2([t1, t2], -2)
|
||||
self.assertEqual([4, 3], c.get_shape().as_list())
|
||||
output = c.eval()
|
||||
self.assertAllEqual(
|
||||
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
|
||||
output)
|
||||
self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
|
||||
output)
|
||||
|
||||
c = gen_array_ops._concat_v2([t1, t2], -1)
|
||||
self.assertEqual([2, 6], c.get_shape().as_list())
|
||||
output = c.eval()
|
||||
self.assertAllEqual(
|
||||
[[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]],
|
||||
output)
|
||||
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
|
||||
|
||||
|
||||
class ConcatOffsetTest(tf.test.TestCase):
|
||||
class ConcatOffsetTest(test.TestCase):
|
||||
|
||||
def testBasic(self):
|
||||
for use_gpu in [False, True]:
|
||||
with self.test_session(use_gpu=use_gpu) as sess:
|
||||
cdim = tf.constant(1, tf.int32)
|
||||
s0 = tf.constant([2, 3, 5], tf.int32)
|
||||
s1 = tf.constant([2, 7, 5], tf.int32)
|
||||
s2 = tf.constant([2, 20, 5], tf.int32)
|
||||
cdim = constant_op.constant(1, dtypes.int32)
|
||||
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
|
||||
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
|
||||
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
|
||||
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
|
||||
ans = sess.run(off)
|
||||
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
|
||||
|
||||
def testNotVector(self):
|
||||
with self.test_session() as sess:
|
||||
cdim = tf.constant(1, tf.int32)
|
||||
s0 = tf.constant([[2, 3, 5]], tf.int32)
|
||||
s1 = tf.constant([[2, 7, 5]], tf.int32)
|
||||
cdim = constant_op.constant(1, dtypes.int32)
|
||||
s0 = constant_op.constant([[2, 3, 5]], dtypes.int32)
|
||||
s1 = constant_op.constant([[2, 7, 5]], dtypes.int32)
|
||||
off = gen_array_ops._concat_offset(cdim, [s0, s1])
|
||||
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
|
||||
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
|
||||
r"should be a vector"):
|
||||
sess.run(off)
|
||||
|
||||
def testConcatDimOutOfRange(self):
|
||||
with self.test_session() as sess:
|
||||
cdim = tf.constant(4, tf.int32)
|
||||
s0 = tf.constant([2, 3, 5], tf.int32)
|
||||
s1 = tf.constant([2, 7, 5], tf.int32)
|
||||
cdim = constant_op.constant(4, dtypes.int32)
|
||||
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
|
||||
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
|
||||
off = gen_array_ops._concat_offset(cdim, [s0, s1])
|
||||
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
|
||||
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
|
||||
r"Concat dim is out of range: 4 vs. 3"):
|
||||
sess.run(off)
|
||||
|
||||
def testDimMismatch(self):
|
||||
with self.test_session() as sess:
|
||||
cdim = tf.constant(1, tf.int32)
|
||||
s0 = tf.constant([2, 3, 5], tf.int32)
|
||||
s1 = tf.constant([2, 7, 5, 10], tf.int32)
|
||||
cdim = constant_op.constant(1, dtypes.int32)
|
||||
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
|
||||
s1 = constant_op.constant([2, 7, 5, 10], dtypes.int32)
|
||||
off = gen_array_ops._concat_offset(cdim, [s0, s1])
|
||||
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
|
||||
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
|
||||
r"should contain 3 elem"):
|
||||
sess.run(off)
|
||||
|
||||
def testSizeMismatch(self):
|
||||
with self.test_session() as sess:
|
||||
cdim = tf.constant(1, tf.int32)
|
||||
s0 = tf.constant([2, 3, 5], tf.int32)
|
||||
s1 = tf.constant([2, 7, 10], tf.int32)
|
||||
cdim = constant_op.constant(1, dtypes.int32)
|
||||
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
|
||||
s1 = constant_op.constant([2, 7, 10], dtypes.int32)
|
||||
off = gen_array_ops._concat_offset(cdim, [s0, s1])
|
||||
with self.assertRaisesRegexp(
|
||||
tf.errors.InvalidArgumentError,
|
||||
errors_impl.InvalidArgumentError,
|
||||
r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
|
||||
r"and doesn't match input 0 with shape \[2 3 5\]."):
|
||||
sess.run(off)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -20,17 +20,28 @@ from __future__ import print_function
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes as dtypes_lib
|
||||
from tensorflow.python.framework import errors_impl
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import data_flow_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import state_ops
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
# from functools import reduce
|
||||
|
||||
|
||||
class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
class ConditionalAccumulatorTest(test.TestCase):
|
||||
|
||||
def testConstructor(self):
|
||||
with tf.Graph().as_default():
|
||||
q = tf.ConditionalAccumulator(tf.float32, name="Q")
|
||||
self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
|
||||
with ops.Graph().as_default():
|
||||
q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
|
||||
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
|
||||
self.assertProtoEquals("""
|
||||
name:'Q' op:'ConditionalAccumulator'
|
||||
attr { key: 'dtype' value { type: DT_FLOAT } }
|
||||
@ -40,10 +51,12 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
""", q.accumulator_ref.op.node_def)
|
||||
|
||||
def testConstructorWithShape(self):
|
||||
with tf.Graph().as_default():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1, 5, 2, 8]))
|
||||
self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
|
||||
with ops.Graph().as_default():
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32,
|
||||
name="Q",
|
||||
shape=tensor_shape.TensorShape([1, 5, 2, 8]))
|
||||
self.assertTrue(isinstance(q.accumulator_ref, ops.Tensor))
|
||||
self.assertProtoEquals("""
|
||||
name:'Q' op:'ConditionalAccumulator'
|
||||
attr { key: 'dtype' value { type: DT_FLOAT } }
|
||||
@ -58,30 +71,31 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorSizeEmpty(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(tf.float32, name="Q")
|
||||
q = data_flow_ops.ConditionalAccumulator(dtypes_lib.float32, name="Q")
|
||||
self.assertEqual(q.num_accumulated().eval(), 0)
|
||||
|
||||
def testAccumulatorSetGlobalStep(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
set_global_step_op = q.set_global_step(1)
|
||||
set_global_step_op.run()
|
||||
|
||||
def testAccumulatorApplyGradFloat32(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
accum_op = q.apply_grad((10.0,))
|
||||
accum_op.run()
|
||||
|
||||
def testDtypes(self):
|
||||
with self.test_session() as sess:
|
||||
dtypes = [tf.float16, tf.float32, tf.float64]
|
||||
dtypes = [dtypes_lib.float16, dtypes_lib.float32, dtypes_lib.float64]
|
||||
|
||||
for i in range(len(dtypes)):
|
||||
dtype = dtypes[i]
|
||||
q = tf.ConditionalAccumulator(dtype, shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtype, shape=tensor_shape.TensorShape([1]))
|
||||
|
||||
elems = np.arange(10).astype(dtype.as_numpy_dtype)
|
||||
for e in elems:
|
||||
@ -93,14 +107,14 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorMultipleAccumulators(self):
|
||||
with self.test_session():
|
||||
q_f32_0 = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q_f32_1 = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q_f16_0 = tf.ConditionalAccumulator(
|
||||
tf.float16, name="Q", shape=tf.TensorShape([1]))
|
||||
q_f16_1 = tf.ConditionalAccumulator(
|
||||
tf.float16, name="Q", shape=tf.TensorShape([1]))
|
||||
q_f32_0 = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
q_f32_1 = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
q_f16_0 = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
q_f16_1 = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float16, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
|
||||
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
|
||||
for i in range(len(accums)):
|
||||
@ -112,7 +126,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorApplyAndTakeGradWithShape(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=(3, 2))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=(3, 2))
|
||||
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
|
||||
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
|
||||
elems_ave = [[(a + b) / len(elems) for a, b in zip(x, y)]
|
||||
@ -131,7 +146,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
self.assertTrue(is_all_equal)
|
||||
|
||||
def testAccumulatorApplyGradWithWrongShape(self):
|
||||
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=(3, 2))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=(3, 2))
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
q.apply_grad([[1.0, 2.0], [3.0, 4.0]])
|
||||
@ -141,9 +157,10 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorDynamicShape(self):
|
||||
with self.test_session() as sess:
|
||||
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=None)
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=None)
|
||||
|
||||
x = tf.placeholder(tf.float32)
|
||||
x = array_ops.placeholder(dtypes_lib.float32)
|
||||
|
||||
accum_op = q.apply_grad(x)
|
||||
|
||||
@ -165,25 +182,26 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorWrongDynamicShape(self):
|
||||
with self.test_session() as sess:
|
||||
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=None)
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=None)
|
||||
|
||||
x = tf.placeholder(tf.float32)
|
||||
x = array_ops.placeholder(dtypes_lib.float32)
|
||||
|
||||
accum_op = q.apply_grad(x)
|
||||
|
||||
# First successful apply_grad determines shape
|
||||
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})
|
||||
|
||||
with self.assertRaises(tf.errors.InvalidArgumentError):
|
||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0]]})
|
||||
|
||||
with self.assertRaises(tf.errors.InvalidArgumentError):
|
||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||
sess.run(accum_op, feed_dict={x: [[1.0], [2.0], [3.0]]})
|
||||
|
||||
def testAccumulatorSizeAfterApplyGrad(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
accum_op = q.apply_grad((10.0,))
|
||||
self.assertEqual(q.num_accumulated().eval(), 0)
|
||||
accum_op.run()
|
||||
@ -193,8 +211,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorSizeAfterApplyGradAndTakeGrad(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
accum_op = q.apply_grad((10.0,))
|
||||
extract_t = q.take_grad(2)
|
||||
|
||||
@ -221,8 +239,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorTakeGrad(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
elems = [10.0, 20.0]
|
||||
elems_ave = sum(elems) / len(elems)
|
||||
|
||||
@ -236,7 +254,7 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
self.assertEqual(elems_ave, val)
|
||||
|
||||
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
|
||||
takeg_t = q.take_grad(tf.constant(1))
|
||||
takeg_t = q.take_grad(constant_op.constant(1))
|
||||
|
||||
for accum_op in accum_ops:
|
||||
accum_op.run()
|
||||
@ -246,8 +264,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorInvalidTakeGrad(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
elems = [10.0, 20.0]
|
||||
accum_ops = [q.apply_grad((x,)) for x in elems]
|
||||
|
||||
@ -256,13 +274,13 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
for accum_op in accum_ops:
|
||||
accum_op.run()
|
||||
|
||||
with self.assertRaises(tf.errors.InvalidArgumentError):
|
||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||
takeg_t.eval()
|
||||
|
||||
def testAccumulatorRepeatedTakeGrad(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
|
||||
elems = [10.0, 20.0]
|
||||
elems_ave = sum(elems) / len(elems)
|
||||
@ -288,24 +306,24 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorIncrementGlobalStep(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
|
||||
global_step = tf.Variable(0, name="global_step")
|
||||
new_global_step = tf.add(global_step, 1)
|
||||
inc_global_step = tf.assign(global_step, new_global_step)
|
||||
global_step = variables.Variable(0, name="global_step")
|
||||
new_global_step = math_ops.add(global_step, 1)
|
||||
inc_global_step = state_ops.assign(global_step, new_global_step)
|
||||
|
||||
set_global_step_op = q.set_global_step(new_global_step)
|
||||
|
||||
tf.global_variables_initializer().run()
|
||||
variables.global_variables_initializer().run()
|
||||
for _ in range(3):
|
||||
set_global_step_op.run()
|
||||
inc_global_step.eval()
|
||||
|
||||
def testAccumulatorSetGlobalStepPreventsAccumulation(self):
|
||||
with self.test_session():
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
|
||||
local_steps = range(1000, 1005)
|
||||
accum_ops = [q.apply_grad((0.0 + x,), local_step=x) for x in local_steps]
|
||||
@ -325,8 +343,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testParallelApplyGrad(self):
|
||||
with self.test_session() as sess:
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
|
||||
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
|
||||
takeg_t = q.take_grad(1)
|
||||
@ -334,8 +352,10 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
def apply_grad(accum_op):
|
||||
sess.run(accum_op)
|
||||
|
||||
threads = [self.checkedThread(
|
||||
target=apply_grad, args=(o,)) for o in accum_ops]
|
||||
threads = [
|
||||
self.checkedThread(
|
||||
target=apply_grad, args=(o,)) for o in accum_ops
|
||||
]
|
||||
|
||||
for thread in threads:
|
||||
thread.start()
|
||||
@ -348,8 +368,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testParallelTakeGrad(self):
|
||||
with self.test_session() as sess:
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
elems = [e for e in range(10)]
|
||||
accum_ops = [q.apply_grad((np.float32(e),), local_step=e) for e in elems]
|
||||
takeg_t = q.take_grad(1)
|
||||
@ -380,8 +400,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorApplyAndBlockingTake(self):
|
||||
with self.test_session() as sess:
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
|
||||
elems = [10.0, 20.0, 30.0]
|
||||
elems_ave = sum(elems) / len(elems)
|
||||
@ -413,8 +433,8 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
def testAccumulatorCancel(self):
|
||||
with self.test_session() as sess:
|
||||
q = tf.ConditionalAccumulator(
|
||||
tf.float32, name="Q", shape=tf.TensorShape([1]))
|
||||
q = data_flow_ops.ConditionalAccumulator(
|
||||
dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([1]))
|
||||
takeg_t = q.take_grad(1)
|
||||
|
||||
takeg_thread = self.checkedThread(
|
||||
@ -428,5 +448,6 @@ class ConditionalAccumulatorTest(tf.test.TestCase):
|
||||
|
||||
takeg_thread.join()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -13,22 +13,28 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Tests for confusion_matrix_ops."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import confusion_matrix
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import random_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class ConfusionMatrixTest(tf.test.TestCase):
|
||||
class ConfusionMatrixTest(test.TestCase):
|
||||
|
||||
def _testConfMatrix(self, predictions, labels, truth, weights=None):
|
||||
with self.test_session():
|
||||
dtype = predictions.dtype
|
||||
ans = tf.confusion_matrix(
|
||||
ans = confusion_matrix.confusion_matrix(
|
||||
labels, predictions, dtype=dtype, weights=weights)
|
||||
tf_ans = ans.eval()
|
||||
self.assertAllClose(tf_ans, truth, atol=1e-10)
|
||||
@ -56,27 +62,30 @@ class ConfusionMatrixTest(tf.test.TestCase):
|
||||
|
||||
def _testConfMatrixOnTensors(self, tf_dtype, np_dtype):
|
||||
with self.test_session() as sess:
|
||||
m_neg = tf.placeholder(dtype=tf.float32)
|
||||
m_pos = tf.placeholder(dtype=tf.float32)
|
||||
s = tf.placeholder(dtype=tf.float32)
|
||||
m_neg = array_ops.placeholder(dtype=dtypes.float32)
|
||||
m_pos = array_ops.placeholder(dtype=dtypes.float32)
|
||||
s = array_ops.placeholder(dtype=dtypes.float32)
|
||||
|
||||
neg = tf.random_normal([20], mean=m_neg, stddev=s, dtype=tf.float32)
|
||||
pos = tf.random_normal([20], mean=m_pos, stddev=s, dtype=tf.float32)
|
||||
neg = random_ops.random_normal(
|
||||
[20], mean=m_neg, stddev=s, dtype=dtypes.float32)
|
||||
pos = random_ops.random_normal(
|
||||
[20], mean=m_pos, stddev=s, dtype=dtypes.float32)
|
||||
|
||||
data = tf.concat_v2([neg, pos], 0)
|
||||
data = tf.cast(tf.round(data), tf_dtype)
|
||||
data = tf.minimum(tf.maximum(data, 0), 1)
|
||||
lab = tf.concat_v2(
|
||||
[tf.zeros(
|
||||
[20], dtype=tf_dtype), tf.ones(
|
||||
[20], dtype=tf_dtype)], 0)
|
||||
data = array_ops.concat_v2([neg, pos], 0)
|
||||
data = math_ops.cast(math_ops.round(data), tf_dtype)
|
||||
data = math_ops.minimum(math_ops.maximum(data, 0), 1)
|
||||
lab = array_ops.concat_v2(
|
||||
[
|
||||
array_ops.zeros(
|
||||
[20], dtype=tf_dtype), array_ops.ones(
|
||||
[20], dtype=tf_dtype)
|
||||
],
|
||||
0)
|
||||
|
||||
cm = tf.confusion_matrix(
|
||||
cm = confusion_matrix.confusion_matrix(
|
||||
lab, data, dtype=tf_dtype, num_classes=2)
|
||||
|
||||
d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0,
|
||||
m_pos: 1.0,
|
||||
s: 1.0})
|
||||
d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0, m_pos: 1.0, s: 1.0})
|
||||
|
||||
truth = np.zeros([2, 2], dtype=np_dtype)
|
||||
try:
|
||||
@ -90,10 +99,10 @@ class ConfusionMatrixTest(tf.test.TestCase):
|
||||
self.assertAllClose(cm_out, truth, atol=1e-10)
|
||||
|
||||
def _testOnTensors_int32(self):
|
||||
self._testConfMatrixOnTensors(tf.int32, np.int32)
|
||||
self._testConfMatrixOnTensors(dtypes.int32, np.int32)
|
||||
|
||||
def testOnTensors_int64(self):
|
||||
self._testConfMatrixOnTensors(tf.int64, np.int64)
|
||||
self._testConfMatrixOnTensors(dtypes.int64, np.int64)
|
||||
|
||||
def _testDifferentLabelsInPredictionAndTarget(self, dtype):
|
||||
predictions = np.asarray([1, 2, 3], dtype=dtype)
|
||||
@ -142,7 +151,7 @@ class ConfusionMatrixTest(tf.test.TestCase):
|
||||
def testWeighted(self):
|
||||
predictions = np.arange(5, dtype=np.int32)
|
||||
labels = np.arange(5, dtype=np.int32)
|
||||
weights = tf.constant(np.arange(5, dtype=np.int32))
|
||||
weights = constant_op.constant(np.arange(5, dtype=np.int32))
|
||||
|
||||
truth = np.asarray(
|
||||
[[0, 0, 0, 0, 0],
|
||||
@ -159,27 +168,27 @@ class ConfusionMatrixTest(tf.test.TestCase):
|
||||
predictions = np.asarray([[1, 2, 3]])
|
||||
labels = np.asarray([1, 2, 3])
|
||||
self.assertRaisesRegexp(ValueError, "an not squeeze dim",
|
||||
tf.confusion_matrix,
|
||||
predictions, labels)
|
||||
confusion_matrix.confusion_matrix, predictions,
|
||||
labels)
|
||||
|
||||
predictions = np.asarray([1, 2, 3])
|
||||
labels = np.asarray([[1, 2, 3]])
|
||||
self.assertRaisesRegexp(ValueError, "an not squeeze dim",
|
||||
tf.confusion_matrix,
|
||||
predictions, labels)
|
||||
confusion_matrix.confusion_matrix, predictions,
|
||||
labels)
|
||||
|
||||
def testInputDifferentSize(self):
|
||||
predictions = np.asarray([1, 2, 3])
|
||||
labels = np.asarray([1, 2])
|
||||
self.assertRaisesRegexp(ValueError, "must be equal",
|
||||
tf.confusion_matrix,
|
||||
predictions, labels)
|
||||
confusion_matrix.confusion_matrix, predictions,
|
||||
labels)
|
||||
|
||||
def testOutputIsInt32(self):
|
||||
predictions = np.arange(2)
|
||||
labels = np.arange(2)
|
||||
with self.test_session():
|
||||
cm = tf.confusion_matrix(
|
||||
cm = confusion_matrix.confusion_matrix(
|
||||
labels, predictions, dtype=dtypes.int32)
|
||||
tf_cm = cm.eval()
|
||||
self.assertEqual(tf_cm.dtype, np.int32)
|
||||
@ -188,11 +197,11 @@ class ConfusionMatrixTest(tf.test.TestCase):
|
||||
predictions = np.arange(2)
|
||||
labels = np.arange(2)
|
||||
with self.test_session():
|
||||
cm = tf.confusion_matrix(
|
||||
cm = confusion_matrix.confusion_matrix(
|
||||
labels, predictions, dtype=dtypes.int64)
|
||||
tf_cm = cm.eval()
|
||||
self.assertEqual(tf_cm.dtype, np.int64)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,24 +12,32 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for ConstantOp."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes as dtypes_lib
|
||||
from tensorflow.python.framework import errors_impl
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.util import compat
|
||||
|
||||
|
||||
class ConstantTest(tf.test.TestCase):
|
||||
class ConstantTest(test.TestCase):
|
||||
|
||||
def _testCpu(self, x):
|
||||
np_ans = np.array(x)
|
||||
with self.test_session(use_gpu=False):
|
||||
tf_ans = tf.convert_to_tensor(x).eval()
|
||||
tf_ans = ops.convert_to_tensor(x).eval()
|
||||
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
else:
|
||||
@ -38,7 +46,7 @@ class ConstantTest(tf.test.TestCase):
|
||||
def _testGpu(self, x):
|
||||
np_ans = np.array(x)
|
||||
with self.test_session(use_gpu=True):
|
||||
tf_ans = tf.convert_to_tensor(x).eval()
|
||||
tf_ans = ops.convert_to_tensor(x).eval()
|
||||
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
|
||||
self.assertAllClose(np_ans, tf_ans)
|
||||
else:
|
||||
@ -62,231 +70,238 @@ class ConstantTest(tf.test.TestCase):
|
||||
|
||||
def testInt32(self):
|
||||
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
|
||||
self._testAll(
|
||||
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
|
||||
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
|
||||
np.int32))
|
||||
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
|
||||
|
||||
def testInt64(self):
|
||||
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
|
||||
self._testAll(
|
||||
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
|
||||
self._testAll((100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(
|
||||
np.int64))
|
||||
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
|
||||
|
||||
def testComplex64(self):
|
||||
self._testAll(
|
||||
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
|
||||
np.complex64))
|
||||
self._testAll(np.complex(
|
||||
1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
|
||||
np.complex64))
|
||||
np.complex(1, 2) *
|
||||
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
|
||||
self._testAll(
|
||||
np.complex(1, 2) *
|
||||
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
|
||||
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
|
||||
|
||||
def testComplex128(self):
|
||||
self._testAll(
|
||||
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(
|
||||
np.complex128))
|
||||
self._testAll(np.complex(
|
||||
1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(
|
||||
np.complex128))
|
||||
np.complex(1, 2) *
|
||||
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex128))
|
||||
self._testAll(
|
||||
np.complex(1, 2) *
|
||||
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex128))
|
||||
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
|
||||
|
||||
def testString(self):
|
||||
self._testCpu(np.array([tf.compat.as_bytes(str(x))
|
||||
for x in np.arange(-15, 15)]).reshape([2, 3, 5]))
|
||||
self._testCpu(
|
||||
np.array([compat.as_bytes(str(x)) for x in np.arange(-15, 15)]).reshape(
|
||||
[2, 3, 5]))
|
||||
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
|
||||
|
||||
def testStringWithNulls(self):
|
||||
with self.test_session():
|
||||
val = tf.convert_to_tensor(b"\0\0\0\0").eval()
|
||||
val = ops.convert_to_tensor(b"\0\0\0\0").eval()
|
||||
self.assertEqual(len(val), 4)
|
||||
self.assertEqual(val, b"\0\0\0\0")
|
||||
|
||||
with self.test_session():
|
||||
val = tf.convert_to_tensor(b"xx\0xx").eval()
|
||||
val = ops.convert_to_tensor(b"xx\0xx").eval()
|
||||
self.assertEqual(len(val), 5)
|
||||
self.assertAllEqual(val, b"xx\0xx")
|
||||
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
|
||||
|
||||
with self.test_session():
|
||||
val = tf.convert_to_tensor(nested).eval()
|
||||
val = ops.convert_to_tensor(nested).eval()
|
||||
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
|
||||
# numpy array, which loses the null terminators.
|
||||
self.assertEqual(val.tolist(), nested)
|
||||
|
||||
def testExplicitShapeNumPy(self):
|
||||
with tf.Graph().as_default():
|
||||
c = tf.constant(
|
||||
with ops.Graph().as_default():
|
||||
c = constant_op.constant(
|
||||
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
|
||||
shape=[2, 3, 5])
|
||||
self.assertEqual(c.get_shape(), [2, 3, 5])
|
||||
|
||||
def testImplicitShapeNumPy(self):
|
||||
with tf.Graph().as_default():
|
||||
c = tf.constant(
|
||||
with ops.Graph().as_default():
|
||||
c = constant_op.constant(
|
||||
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
|
||||
self.assertEqual(c.get_shape(), [2, 3, 5])
|
||||
|
||||
def testExplicitShapeList(self):
|
||||
with tf.Graph().as_default():
|
||||
c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
|
||||
with ops.Graph().as_default():
|
||||
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
|
||||
self.assertEqual(c.get_shape(), [7])
|
||||
|
||||
def testImplicitShapeList(self):
|
||||
with tf.Graph().as_default():
|
||||
c = tf.constant([1, 2, 3, 4, 5, 6, 7])
|
||||
with ops.Graph().as_default():
|
||||
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
|
||||
self.assertEqual(c.get_shape(), [7])
|
||||
|
||||
def testExplicitShapeNumber(self):
|
||||
with tf.Graph().as_default():
|
||||
c = tf.constant(1, shape=[1])
|
||||
with ops.Graph().as_default():
|
||||
c = constant_op.constant(1, shape=[1])
|
||||
self.assertEqual(c.get_shape(), [1])
|
||||
|
||||
def testImplicitShapeNumber(self):
|
||||
with tf.Graph().as_default():
|
||||
c = tf.constant(1)
|
||||
with ops.Graph().as_default():
|
||||
c = constant_op.constant(1)
|
||||
self.assertEqual(c.get_shape(), [])
|
||||
|
||||
def testShapeInconsistent(self):
|
||||
with tf.Graph().as_default():
|
||||
c = tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
|
||||
with ops.Graph().as_default():
|
||||
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
|
||||
self.assertEqual(c.get_shape(), [10])
|
||||
|
||||
# pylint: disable=g-long-lambda
|
||||
def testShapeWrong(self):
|
||||
with tf.Graph().as_default():
|
||||
with ops.Graph().as_default():
|
||||
with self.assertRaisesWithPredicateMatch(
|
||||
ValueError,
|
||||
lambda e: ("Too many elements provided. Needed at most 5, "
|
||||
"but received 7" == str(e))):
|
||||
tf.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
|
||||
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
|
||||
|
||||
# pylint: enable=g-long-lambda
|
||||
|
||||
def testTooLargeConstant(self):
|
||||
with tf.Graph().as_default():
|
||||
with ops.Graph().as_default():
|
||||
large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
|
||||
with self.assertRaisesRegexp(
|
||||
ValueError,
|
||||
"Cannot create a tensor proto whose content is larger than 2GB."):
|
||||
c = tf.constant(large_array)
|
||||
c = constant_op.constant(large_array)
|
||||
|
||||
def testTooLargeGraph(self):
|
||||
with tf.Graph().as_default() as g:
|
||||
with ops.Graph().as_default() as g:
|
||||
large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
|
||||
c = tf.constant(large_array)
|
||||
d = tf.constant(large_array)
|
||||
with self.assertRaisesRegexp(
|
||||
ValueError, "GraphDef cannot be larger than 2GB."):
|
||||
c = constant_op.constant(large_array)
|
||||
d = constant_op.constant(large_array)
|
||||
with self.assertRaisesRegexp(ValueError,
|
||||
"GraphDef cannot be larger than 2GB."):
|
||||
g.as_graph_def()
|
||||
|
||||
def testSparseValuesRaiseErrors(self):
|
||||
with self.assertRaisesRegexp(ValueError,
|
||||
"setting an array element with a sequence"):
|
||||
c = tf.constant([[1, 2], [3]], dtype=tf.int32)
|
||||
c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
|
||||
|
||||
with self.assertRaisesRegexp(ValueError, "must be a dense"):
|
||||
c = tf.constant([[1, 2], [3]])
|
||||
c = constant_op.constant([[1, 2], [3]])
|
||||
|
||||
with self.assertRaisesRegexp(ValueError, "must be a dense"):
|
||||
c = tf.constant([[1, 2], [3], [4, 5]])
|
||||
c = constant_op.constant([[1, 2], [3], [4, 5]])
|
||||
|
||||
|
||||
class AsTensorTest(tf.test.TestCase):
|
||||
class AsTensorTest(test.TestCase):
|
||||
|
||||
def testAsTensorForTensorInput(self):
|
||||
with tf.Graph().as_default():
|
||||
t = tf.constant(10.0)
|
||||
x = tf.convert_to_tensor(t)
|
||||
with ops.Graph().as_default():
|
||||
t = constant_op.constant(10.0)
|
||||
x = ops.convert_to_tensor(t)
|
||||
self.assertIs(t, x)
|
||||
|
||||
def testAsTensorForNonTensorInput(self):
|
||||
with tf.Graph().as_default():
|
||||
x = tf.convert_to_tensor(10.0)
|
||||
self.assertTrue(isinstance(x, tf.Tensor))
|
||||
with ops.Graph().as_default():
|
||||
x = ops.convert_to_tensor(10.0)
|
||||
self.assertTrue(isinstance(x, ops.Tensor))
|
||||
|
||||
def testAsTensorForShapeInput(self):
|
||||
with self.test_session():
|
||||
x = tf.convert_to_tensor(tf.TensorShape([]))
|
||||
self.assertEqual(tf.int32, x.dtype)
|
||||
x = ops.convert_to_tensor(tensor_shape.TensorShape([]))
|
||||
self.assertEqual(dtypes_lib.int32, x.dtype)
|
||||
self.assertAllEqual([], x.eval())
|
||||
|
||||
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]))
|
||||
self.assertEqual(tf.int32, x.dtype)
|
||||
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3]))
|
||||
self.assertEqual(dtypes_lib.int32, x.dtype)
|
||||
self.assertAllEqual([1, 2, 3], x.eval())
|
||||
|
||||
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.int64)
|
||||
self.assertEqual(tf.int64, x.dtype)
|
||||
x = ops.convert_to_tensor(
|
||||
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64)
|
||||
self.assertEqual(dtypes_lib.int64, x.dtype)
|
||||
self.assertAllEqual([1, 2, 3], x.eval())
|
||||
|
||||
x = tf.reshape(tf.zeros([6]), tf.TensorShape([2, 3]))
|
||||
x = array_ops.reshape(
|
||||
array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
|
||||
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval())
|
||||
|
||||
with self.assertRaisesRegexp(ValueError, "partially known"):
|
||||
tf.convert_to_tensor(tf.TensorShape(None))
|
||||
ops.convert_to_tensor(tensor_shape.TensorShape(None))
|
||||
|
||||
with self.assertRaisesRegexp(ValueError, "partially known"):
|
||||
tf.convert_to_tensor(tf.TensorShape([1, None, 64]))
|
||||
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64]))
|
||||
|
||||
with self.assertRaises(TypeError):
|
||||
tf.convert_to_tensor(tf.TensorShape([1, 2, 3]), dtype=tf.float32)
|
||||
ops.convert_to_tensor(
|
||||
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.float32)
|
||||
|
||||
def testAsTensorForDimensionInput(self):
|
||||
with self.test_session():
|
||||
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1])
|
||||
self.assertEqual(tf.int32, x.dtype)
|
||||
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1])
|
||||
self.assertEqual(dtypes_lib.int32, x.dtype)
|
||||
self.assertAllEqual(2, x.eval())
|
||||
|
||||
x = tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.int64)
|
||||
self.assertEqual(tf.int64, x.dtype)
|
||||
x = ops.convert_to_tensor(
|
||||
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64)
|
||||
self.assertEqual(dtypes_lib.int64, x.dtype)
|
||||
self.assertAllEqual(2, x.eval())
|
||||
|
||||
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
|
||||
tf.convert_to_tensor(tf.TensorShape(None)[1])
|
||||
ops.convert_to_tensor(tensor_shape.TensorShape(None)[1])
|
||||
|
||||
with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
|
||||
tf.convert_to_tensor(tf.TensorShape([1, None, 64])[1])
|
||||
ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
|
||||
|
||||
with self.assertRaises(TypeError):
|
||||
tf.convert_to_tensor(tf.TensorShape([1, 2, 3])[1], dtype=tf.float32)
|
||||
ops.convert_to_tensor(
|
||||
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.float32)
|
||||
|
||||
|
||||
class IdentityOpTest(tf.test.TestCase):
|
||||
class IdentityOpTest(test.TestCase):
|
||||
|
||||
def testIdTensor(self):
|
||||
with tf.Graph().as_default():
|
||||
x = tf.constant(2.0, shape=[6], name="input")
|
||||
id_op = tf.identity(x, name="id")
|
||||
self.assertTrue(isinstance(id_op.op.inputs[0], tf.Tensor))
|
||||
self.assertProtoEquals(
|
||||
"name: 'id' op: 'Identity' input: 'input' "
|
||||
"attr { key: 'T' value { type: DT_FLOAT } }", id_op.op.node_def)
|
||||
with ops.Graph().as_default():
|
||||
x = constant_op.constant(2.0, shape=[6], name="input")
|
||||
id_op = array_ops.identity(x, name="id")
|
||||
self.assertTrue(isinstance(id_op.op.inputs[0], ops.Tensor))
|
||||
self.assertProtoEquals("name: 'id' op: 'Identity' input: 'input' "
|
||||
"attr { key: 'T' value { type: DT_FLOAT } }",
|
||||
id_op.op.node_def)
|
||||
|
||||
|
||||
class ZerosTest(tf.test.TestCase):
|
||||
class ZerosTest(test.TestCase):
|
||||
|
||||
def _Zeros(self, shape):
|
||||
with self.test_session():
|
||||
ret = tf.zeros(shape)
|
||||
ret = array_ops.zeros(shape)
|
||||
self.assertEqual(shape, ret.get_shape())
|
||||
return ret.eval()
|
||||
|
||||
def testConst(self):
|
||||
self.assertTrue(np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] *
|
||||
2)))
|
||||
self.assertTrue(
|
||||
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
|
||||
|
||||
def testScalar(self):
|
||||
self.assertEqual(0, self._Zeros([]))
|
||||
self.assertEqual(0, self._Zeros(()))
|
||||
with self.test_session():
|
||||
scalar = tf.zeros(tf.constant([], dtype=tf.int32))
|
||||
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
|
||||
self.assertEqual(0, scalar.eval())
|
||||
|
||||
def testDynamicSizes(self):
|
||||
np_ans = np.array([[0] * 3] * 2)
|
||||
with self.test_session():
|
||||
# Creates a tensor of 2 x 3.
|
||||
d = tf.fill([2, 3], 12., name="fill")
|
||||
d = array_ops.fill([2, 3], 12., name="fill")
|
||||
# Constructs a tensor of zeros of the same dimensions as "d".
|
||||
z = tf.zeros(tf.shape(d))
|
||||
z = array_ops.zeros(array_ops.shape(d))
|
||||
out = z.eval()
|
||||
self.assertAllEqual(np_ans, out)
|
||||
self.assertShapeEqual(np_ans, d)
|
||||
@ -294,40 +309,43 @@ class ZerosTest(tf.test.TestCase):
|
||||
|
||||
def testDtype(self):
|
||||
with self.test_session():
|
||||
d = tf.fill([2, 3], 12., name="fill")
|
||||
d = array_ops.fill([2, 3], 12., name="fill")
|
||||
self.assertEqual(d.get_shape(), [2, 3])
|
||||
# Test default type for both constant size and dynamic size
|
||||
z = tf.zeros([2, 3])
|
||||
self.assertEqual(z.dtype, tf.float32)
|
||||
z = array_ops.zeros([2, 3])
|
||||
self.assertEqual(z.dtype, dtypes_lib.float32)
|
||||
self.assertEqual([2, 3], z.get_shape())
|
||||
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
|
||||
z = tf.zeros(tf.shape(d))
|
||||
self.assertEqual(z.dtype, tf.float32)
|
||||
z = array_ops.zeros(array_ops.shape(d))
|
||||
self.assertEqual(z.dtype, dtypes_lib.float32)
|
||||
self.assertEqual([2, 3], z.get_shape())
|
||||
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
|
||||
# Test explicit type control
|
||||
for dtype in [tf.float32, tf.float64, tf.int32,
|
||||
tf.uint8, tf.int16, tf.int8,
|
||||
tf.complex64, tf.complex128, tf.int64, tf.bool]:
|
||||
z = tf.zeros([2, 3], dtype=dtype)
|
||||
for dtype in [
|
||||
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
|
||||
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
|
||||
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
|
||||
dtypes_lib.bool
|
||||
]:
|
||||
z = array_ops.zeros([2, 3], dtype=dtype)
|
||||
self.assertEqual(z.dtype, dtype)
|
||||
self.assertEqual([2, 3], z.get_shape())
|
||||
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
|
||||
z = tf.zeros(tf.shape(d), dtype=dtype)
|
||||
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
|
||||
self.assertEqual(z.dtype, dtype)
|
||||
self.assertEqual([2, 3], z.get_shape())
|
||||
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
|
||||
|
||||
|
||||
class ZerosLikeTest(tf.test.TestCase):
|
||||
class ZerosLikeTest(test.TestCase):
|
||||
|
||||
def _compareZeros(self, dtype, use_gpu):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
# Creates a tensor of non-zero values with shape 2 x 3.
|
||||
numpy_dtype = dtype.as_numpy_dtype
|
||||
d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
|
||||
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
|
||||
# Constructs a tensor of zeros of the same dimensions and type as "d".
|
||||
z_var = tf.zeros_like(d)
|
||||
z_var = array_ops.zeros_like(d)
|
||||
# Test that the type is correct
|
||||
self.assertEqual(z_var.dtype, dtype)
|
||||
z_value = z_var.eval()
|
||||
@ -337,17 +355,23 @@ class ZerosLikeTest(tf.test.TestCase):
|
||||
self.assertEqual([2, 3], z_var.get_shape())
|
||||
|
||||
def testZerosLikeCPU(self):
|
||||
for dtype in [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8,
|
||||
tf.complex64, tf.complex128, tf.int64]:
|
||||
for dtype in [
|
||||
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
|
||||
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
|
||||
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
|
||||
]:
|
||||
self._compareZeros(dtype, False)
|
||||
|
||||
def testZerosLikeGPU(self):
|
||||
for dtype in [tf.float32, tf.float64, tf.int32, tf.bool, tf.int64]:
|
||||
for dtype in [
|
||||
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
|
||||
dtypes_lib.bool, dtypes_lib.int64
|
||||
]:
|
||||
self._compareZeros(dtype, True)
|
||||
|
||||
def testZerosLikePartialShape(self):
|
||||
d = tf.placeholder(tf.float32, shape=[None, 4, None])
|
||||
z = tf.zeros_like(d)
|
||||
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
|
||||
z = array_ops.zeros_like(d)
|
||||
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
|
||||
|
||||
def testZerosLikeDtype(self):
|
||||
@ -358,17 +382,17 @@ class ZerosLikeTest(tf.test.TestCase):
|
||||
for in_type in dtypes:
|
||||
x = np.arange(15).astype(in_type).reshape(*shape)
|
||||
for out_type in dtypes:
|
||||
y = tf.zeros_like(x, dtype=out_type).eval()
|
||||
y = array_ops.zeros_like(x, dtype=out_type).eval()
|
||||
self.assertEqual(y.dtype, out_type)
|
||||
self.assertEqual(y.shape, shape)
|
||||
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
|
||||
|
||||
|
||||
class OnesTest(tf.test.TestCase):
|
||||
class OnesTest(test.TestCase):
|
||||
|
||||
def _Ones(self, shape):
|
||||
with self.test_session():
|
||||
ret = tf.ones(shape)
|
||||
ret = array_ops.ones(shape)
|
||||
self.assertEqual(shape, ret.get_shape())
|
||||
return ret.eval()
|
||||
|
||||
@ -379,16 +403,16 @@ class OnesTest(tf.test.TestCase):
|
||||
self.assertEqual(1, self._Ones([]))
|
||||
self.assertEqual(1, self._Ones(()))
|
||||
with self.test_session():
|
||||
scalar = tf.ones(tf.constant([], dtype=tf.int32))
|
||||
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
|
||||
self.assertEqual(1, scalar.eval())
|
||||
|
||||
def testDynamicSizes(self):
|
||||
np_ans = np.array([[1] * 3] * 2)
|
||||
with self.test_session():
|
||||
# Creates a tensor of 2 x 3.
|
||||
d = tf.fill([2, 3], 12., name="fill")
|
||||
d = array_ops.fill([2, 3], 12., name="fill")
|
||||
# Constructs a tensor of ones of the same dimensions as "d".
|
||||
z = tf.ones(tf.shape(d))
|
||||
z = array_ops.ones(array_ops.shape(d))
|
||||
out = z.eval()
|
||||
self.assertAllEqual(np_ans, out)
|
||||
self.assertShapeEqual(np_ans, d)
|
||||
@ -396,51 +420,56 @@ class OnesTest(tf.test.TestCase):
|
||||
|
||||
def testAutoPack(self):
|
||||
with self.test_session():
|
||||
h = tf.placeholder(tf.int32, shape=[])
|
||||
w = tf.placeholder(tf.int32, shape=[])
|
||||
z = tf.ones([h, w])
|
||||
h = array_ops.placeholder(dtypes_lib.int32, shape=[])
|
||||
w = array_ops.placeholder(dtypes_lib.int32, shape=[])
|
||||
z = array_ops.ones([h, w])
|
||||
out = z.eval(feed_dict={h: 4, w: 16})
|
||||
self.assertAllEqual(out, np.array([[1] * 16] * 4))
|
||||
|
||||
def testDtype(self):
|
||||
with self.test_session():
|
||||
d = tf.fill([2, 3], 12., name="fill")
|
||||
d = array_ops.fill([2, 3], 12., name="fill")
|
||||
self.assertEqual(d.get_shape(), [2, 3])
|
||||
# Test default type for both constant size and dynamic size
|
||||
z = tf.ones([2, 3])
|
||||
self.assertEqual(z.dtype, tf.float32)
|
||||
z = array_ops.ones([2, 3])
|
||||
self.assertEqual(z.dtype, dtypes_lib.float32)
|
||||
self.assertEqual([2, 3], z.get_shape())
|
||||
self.assertAllEqual(z.eval(), np.ones([2, 3]))
|
||||
z = tf.ones(tf.shape(d))
|
||||
self.assertEqual(z.dtype, tf.float32)
|
||||
z = array_ops.ones(array_ops.shape(d))
|
||||
self.assertEqual(z.dtype, dtypes_lib.float32)
|
||||
self.assertEqual([2, 3], z.get_shape())
|
||||
self.assertAllEqual(z.eval(), np.ones([2, 3]))
|
||||
# Test explicit type control
|
||||
for dtype in (tf.float32, tf.float64, tf.int32,
|
||||
tf.uint8, tf.int16, tf.int8,
|
||||
tf.complex64, tf.complex128, tf.int64, tf.bool):
|
||||
z = tf.ones([2, 3], dtype=dtype)
|
||||
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
|
||||
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
|
||||
dtypes_lib.complex64, dtypes_lib.complex128,
|
||||
dtypes_lib.int64, dtypes_lib.bool):
|
||||
z = array_ops.ones([2, 3], dtype=dtype)
|
||||
self.assertEqual(z.dtype, dtype)
|
||||
self.assertEqual([2, 3], z.get_shape())
|
||||
self.assertAllEqual(z.eval(), np.ones([2, 3]))
|
||||
z = tf.ones(tf.shape(d), dtype=dtype)
|
||||
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
|
||||
self.assertEqual(z.dtype, dtype)
|
||||
self.assertEqual([2, 3], z.get_shape())
|
||||
self.assertAllEqual(z.eval(), np.ones([2, 3]))
|
||||
|
||||
|
||||
class OnesLikeTest(tf.test.TestCase):
|
||||
class OnesLikeTest(test.TestCase):
|
||||
|
||||
def testOnesLike(self):
|
||||
for dtype in [tf.float32, tf.float64, tf.int32,
|
||||
tf.uint8, tf.int16, tf.int8,
|
||||
tf.complex64, tf.complex128, tf.int64]:
|
||||
for dtype in [
|
||||
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
|
||||
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
|
||||
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
|
||||
]:
|
||||
numpy_dtype = dtype.as_numpy_dtype
|
||||
with self.test_session():
|
||||
# Creates a tensor of non-zero values with shape 2 x 3.
|
||||
d = tf.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
|
||||
d = constant_op.constant(
|
||||
np.ones(
|
||||
(2, 3), dtype=numpy_dtype), dtype=dtype)
|
||||
# Constructs a tensor of zeros of the same dimensions and type as "d".
|
||||
z_var = tf.ones_like(d)
|
||||
z_var = array_ops.ones_like(d)
|
||||
# Test that the type is correct
|
||||
self.assertEqual(z_var.dtype, dtype)
|
||||
z_value = z_var.eval()
|
||||
@ -450,16 +479,16 @@ class OnesLikeTest(tf.test.TestCase):
|
||||
self.assertEqual([2, 3], z_var.get_shape())
|
||||
|
||||
def testOnesLikePartialShape(self):
|
||||
d = tf.placeholder(tf.float32, shape=[None, 4, None])
|
||||
z = tf.ones_like(d)
|
||||
d = array_ops.placeholder(dtypes_lib.float32, shape=[None, 4, None])
|
||||
z = array_ops.ones_like(d)
|
||||
self.assertEqual(d.get_shape().as_list(), z.get_shape().as_list())
|
||||
|
||||
|
||||
class FillTest(tf.test.TestCase):
|
||||
class FillTest(test.TestCase):
|
||||
|
||||
def _compare(self, dims, val, np_ans, use_gpu):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
tf_ans = tf.fill(dims, val, name="fill")
|
||||
tf_ans = array_ops.fill(dims, val, name="fill")
|
||||
out = tf_ans.eval()
|
||||
self.assertAllClose(np_ans, out)
|
||||
# Fill does not set the shape.
|
||||
@ -496,58 +525,59 @@ class FillTest(tf.test.TestCase):
|
||||
def testFillString(self):
|
||||
np_ans = np.array([[b"yolo"] * 3] * 2)
|
||||
with self.test_session(use_gpu=False):
|
||||
tf_ans = tf.fill([2, 3], np_ans[0][0], name="fill").eval()
|
||||
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").eval()
|
||||
self.assertAllEqual(np_ans, tf_ans)
|
||||
|
||||
def testFillNegative(self):
|
||||
with self.test_session():
|
||||
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
|
||||
with self.assertRaises(ValueError):
|
||||
tf.fill(shape, 7)
|
||||
array_ops.fill(shape, 7)
|
||||
|
||||
# Using a placeholder so this won't be caught in static analysis.
|
||||
dims = tf.placeholder(tf.int32)
|
||||
fill_t = tf.fill(dims, 3.0)
|
||||
dims = array_ops.placeholder(dtypes_lib.int32)
|
||||
fill_t = array_ops.fill(dims, 3.0)
|
||||
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
|
||||
with self.assertRaises(tf.errors.InvalidArgumentError):
|
||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||
fill_t.eval({dims: shape})
|
||||
|
||||
def testShapeFunctionEdgeCases(self):
|
||||
# Non-vector dimensions.
|
||||
with self.assertRaises(ValueError):
|
||||
tf.fill([[0, 1], [2, 3]], 1.0)
|
||||
array_ops.fill([[0, 1], [2, 3]], 1.0)
|
||||
|
||||
# Non-scalar value.
|
||||
with self.assertRaises(ValueError):
|
||||
tf.fill([3, 2], [1.0, 2.0])
|
||||
array_ops.fill([3, 2], [1.0, 2.0])
|
||||
|
||||
# Partial dimension information.
|
||||
f = tf.fill(
|
||||
tf.placeholder(tf.int32, shape=(4,)), 3.0)
|
||||
f = array_ops.fill(array_ops.placeholder(dtypes_lib.int32, shape=(4,)), 3.0)
|
||||
self.assertEqual([None, None, None, None], f.get_shape().as_list())
|
||||
|
||||
f = tf.fill([tf.placeholder(tf.int32, shape=()), 17], 1.0)
|
||||
f = array_ops.fill(
|
||||
[array_ops.placeholder(
|
||||
dtypes_lib.int32, shape=()), 17], 1.0)
|
||||
self.assertEqual([None, 17], f.get_shape().as_list())
|
||||
|
||||
def testGradient(self):
|
||||
with self.test_session():
|
||||
in_v = tf.constant(5.0)
|
||||
in_v = constant_op.constant(5.0)
|
||||
out_shape = [3, 2]
|
||||
out_filled = tf.fill(out_shape, in_v)
|
||||
err = tf.test.compute_gradient_error(in_v, [],
|
||||
out_filled, out_shape)
|
||||
out_filled = array_ops.fill(out_shape, in_v)
|
||||
err = gradient_checker.compute_gradient_error(in_v, [], out_filled,
|
||||
out_shape)
|
||||
self.assertLess(err, 1e-3)
|
||||
|
||||
|
||||
class PlaceholderTest(tf.test.TestCase):
|
||||
class PlaceholderTest(test.TestCase):
|
||||
|
||||
def testDtype(self):
|
||||
with self.test_session():
|
||||
p = tf.placeholder(tf.float32, name="p")
|
||||
p_identity = tf.identity(p)
|
||||
p = array_ops.placeholder(dtypes_lib.float32, name="p")
|
||||
p_identity = array_ops.identity(p)
|
||||
feed_array = np.random.rand(10, 10)
|
||||
self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
|
||||
feed_array)
|
||||
self.assertAllClose(
|
||||
p_identity.eval(feed_dict={p: feed_array}), feed_array)
|
||||
|
||||
with self.assertRaisesOpError(
|
||||
"must feed a value for placeholder tensor 'p' with dtype float"):
|
||||
@ -555,11 +585,11 @@ class PlaceholderTest(tf.test.TestCase):
|
||||
|
||||
def testShape(self):
|
||||
with self.test_session():
|
||||
p = tf.placeholder(tf.float32, shape=(10, 10), name="p")
|
||||
p_identity = tf.identity(p)
|
||||
p = array_ops.placeholder(dtypes_lib.float32, shape=(10, 10), name="p")
|
||||
p_identity = array_ops.identity(p)
|
||||
feed_array = np.random.rand(10, 10)
|
||||
self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
|
||||
feed_array)
|
||||
self.assertAllClose(
|
||||
p_identity.eval(feed_dict={p: feed_array}), feed_array)
|
||||
|
||||
with self.assertRaisesOpError(
|
||||
"must feed a value for placeholder tensor 'p' with dtype float and "
|
||||
@ -572,11 +602,11 @@ class PlaceholderTest(tf.test.TestCase):
|
||||
|
||||
def testPartialShape(self):
|
||||
with self.test_session():
|
||||
p = tf.placeholder(tf.float32, shape=[None, 3], name="p")
|
||||
p_identity = tf.identity(p)
|
||||
p = array_ops.placeholder(dtypes_lib.float32, shape=[None, 3], name="p")
|
||||
p_identity = array_ops.identity(p)
|
||||
feed_array = np.random.rand(10, 3)
|
||||
self.assertAllClose(p_identity.eval(feed_dict={p: feed_array}),
|
||||
feed_array)
|
||||
self.assertAllClose(
|
||||
p_identity.eval(feed_dict={p: feed_array}), feed_array)
|
||||
|
||||
with self.assertRaisesWithPredicateMatch(
|
||||
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
|
||||
@ -584,42 +614,36 @@ class PlaceholderTest(tf.test.TestCase):
|
||||
|
||||
def testControlDependency(self):
|
||||
with self.test_session():
|
||||
p = tf.placeholder(tf.int32, shape=[], name="p")
|
||||
with tf.control_dependencies([p]):
|
||||
c = tf.constant(5, tf.int32)
|
||||
d = tf.mul(p, c)
|
||||
p = array_ops.placeholder(dtypes_lib.int32, shape=[], name="p")
|
||||
with ops.control_dependencies([p]):
|
||||
c = constant_op.constant(5, dtypes_lib.int32)
|
||||
d = math_ops.mul(p, c)
|
||||
self.assertEqual(10, d.eval(feed_dict={p: 2}))
|
||||
|
||||
def testBadShape(self):
|
||||
with self.assertRaises(ValueError):
|
||||
tf.placeholder(tf.float32, shape=(-1, 10))
|
||||
array_ops.placeholder(dtypes_lib.float32, shape=(-1, 10))
|
||||
|
||||
def testTensorStr(self):
|
||||
a = tf.placeholder(tf.float32, name="a")
|
||||
a = array_ops.placeholder(dtypes_lib.float32, name="a")
|
||||
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
|
||||
|
||||
b = tf.placeholder(tf.int32, shape=(32, 40), name="b")
|
||||
self.assertEqual(
|
||||
"<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>",
|
||||
repr(b))
|
||||
b = array_ops.placeholder(dtypes_lib.int32, shape=(32, 40), name="b")
|
||||
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
|
||||
|
||||
c = tf.placeholder(tf.qint32, shape=(32, None, 2), name="c")
|
||||
self.assertEqual(
|
||||
"<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>",
|
||||
repr(c))
|
||||
c = array_ops.placeholder(dtypes_lib.qint32, shape=(32, None, 2), name="c")
|
||||
self.assertEqual("<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
|
||||
|
||||
|
||||
class PlaceholderV2Test(tf.test.TestCase):
|
||||
class PlaceholderV2Test(test.TestCase):
|
||||
|
||||
def testDtype(self):
|
||||
with self.test_session():
|
||||
p = array_ops.placeholder_v2(tf.float32, shape=None, name="p")
|
||||
p_identity = tf.identity(p)
|
||||
p = array_ops.placeholder_v2(dtypes_lib.float32, shape=None, name="p")
|
||||
p_identity = array_ops.identity(p)
|
||||
feed_array = np.random.rand(10, 10)
|
||||
self.assertAllClose(
|
||||
p_identity.eval(feed_dict={
|
||||
p: feed_array
|
||||
}), feed_array)
|
||||
p_identity.eval(feed_dict={p: feed_array}), feed_array)
|
||||
|
||||
with self.assertRaisesOpError(
|
||||
"must feed a value for placeholder tensor 'p' with dtype float"):
|
||||
@ -627,13 +651,11 @@ class PlaceholderV2Test(tf.test.TestCase):
|
||||
|
||||
def testShape(self):
|
||||
with self.test_session():
|
||||
p = array_ops.placeholder_v2(tf.float32, shape=(10, 10), name="p")
|
||||
p_identity = tf.identity(p)
|
||||
p = array_ops.placeholder_v2(dtypes_lib.float32, shape=(10, 10), name="p")
|
||||
p_identity = array_ops.identity(p)
|
||||
feed_array = np.random.rand(10, 10)
|
||||
self.assertAllClose(
|
||||
p_identity.eval(feed_dict={
|
||||
p: feed_array
|
||||
}), feed_array)
|
||||
p_identity.eval(feed_dict={p: feed_array}), feed_array)
|
||||
|
||||
with self.assertRaisesOpError(
|
||||
"must feed a value for placeholder tensor 'p' with dtype float and "
|
||||
@ -646,35 +668,30 @@ class PlaceholderV2Test(tf.test.TestCase):
|
||||
|
||||
def testUnknownShape(self):
|
||||
with self.test_session():
|
||||
p = array_ops.placeholder_v2(tf.float32, shape=None, name="p")
|
||||
p_identity = tf.identity(p)
|
||||
p = array_ops.placeholder_v2(dtypes_lib.float32, shape=None, name="p")
|
||||
p_identity = array_ops.identity(p)
|
||||
# can feed anything
|
||||
feed_array = np.random.rand(10, 3)
|
||||
self.assertAllClose(
|
||||
p_identity.eval(feed_dict={
|
||||
p: feed_array
|
||||
}), feed_array)
|
||||
p_identity.eval(feed_dict={p: feed_array}), feed_array)
|
||||
feed_array = np.random.rand(4, 2, 5)
|
||||
self.assertAllClose(
|
||||
p_identity.eval(feed_dict={
|
||||
p: feed_array
|
||||
}), feed_array)
|
||||
p_identity.eval(feed_dict={p: feed_array}), feed_array)
|
||||
|
||||
def testScalarShape(self):
|
||||
with self.test_session():
|
||||
p = array_ops.placeholder_v2(tf.float32, shape=[], name="p")
|
||||
p_identity = tf.identity(p)
|
||||
p = array_ops.placeholder_v2(dtypes_lib.float32, shape=[], name="p")
|
||||
p_identity = array_ops.identity(p)
|
||||
self.assertAllClose(p_identity.eval(feed_dict={p: 5}), 5)
|
||||
|
||||
def testPartialShape(self):
|
||||
with self.test_session():
|
||||
p = array_ops.placeholder_v2(tf.float32, shape=[None, 3], name="p")
|
||||
p_identity = tf.identity(p)
|
||||
p = array_ops.placeholder_v2(
|
||||
dtypes_lib.float32, shape=[None, 3], name="p")
|
||||
p_identity = array_ops.identity(p)
|
||||
feed_array = np.random.rand(10, 3)
|
||||
self.assertAllClose(
|
||||
p_identity.eval(feed_dict={
|
||||
p: feed_array
|
||||
}), feed_array)
|
||||
p_identity.eval(feed_dict={p: feed_array}), feed_array)
|
||||
|
||||
with self.assertRaisesWithPredicateMatch(
|
||||
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
|
||||
@ -682,45 +699,46 @@ class PlaceholderV2Test(tf.test.TestCase):
|
||||
|
||||
def testControlDependency(self):
|
||||
with self.test_session():
|
||||
p = array_ops.placeholder_v2(tf.int32, shape=[], name="p")
|
||||
with tf.control_dependencies([p]):
|
||||
c = tf.constant(5, tf.int32)
|
||||
d = tf.mul(p, c)
|
||||
p = array_ops.placeholder_v2(dtypes_lib.int32, shape=[], name="p")
|
||||
with ops.control_dependencies([p]):
|
||||
c = constant_op.constant(5, dtypes_lib.int32)
|
||||
d = math_ops.mul(p, c)
|
||||
val = np.array(2).astype(np.int)
|
||||
self.assertEqual(10, d.eval(feed_dict={p: val}))
|
||||
|
||||
def testBadShape(self):
|
||||
with self.assertRaises(ValueError):
|
||||
array_ops.placeholder_v2(tf.float32, shape=(-1, 10))
|
||||
array_ops.placeholder_v2(dtypes_lib.float32, shape=(-1, 10))
|
||||
|
||||
def testTensorStr(self):
|
||||
a = array_ops.placeholder_v2(tf.float32, shape=None, name="a")
|
||||
a = array_ops.placeholder_v2(dtypes_lib.float32, shape=None, name="a")
|
||||
self.assertEqual("<tf.Tensor 'a:0' shape=<unknown> dtype=float32>", repr(a))
|
||||
|
||||
b = array_ops.placeholder_v2(tf.int32, shape=(32, 40), name="b")
|
||||
b = array_ops.placeholder_v2(dtypes_lib.int32, shape=(32, 40), name="b")
|
||||
self.assertEqual("<tf.Tensor 'b:0' shape=(32, 40) dtype=int32>", repr(b))
|
||||
|
||||
c = array_ops.placeholder_v2(tf.qint32, shape=(32, None, 2), name="c")
|
||||
c = array_ops.placeholder_v2(
|
||||
dtypes_lib.qint32, shape=(32, None, 2), name="c")
|
||||
self.assertEqual("<tf.Tensor 'c:0' shape=(32, ?, 2) dtype=qint32>", repr(c))
|
||||
|
||||
|
||||
class PlaceholderWithDefaultTest(tf.test.TestCase):
|
||||
class PlaceholderWithDefaultTest(test.TestCase):
|
||||
|
||||
def testFullShape(self):
|
||||
with self.test_session():
|
||||
p = tf.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
|
||||
a = tf.identity(p)
|
||||
p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
|
||||
a = array_ops.identity(p)
|
||||
self.assertAllEqual([[2, 2], [2, 2]], a.eval())
|
||||
self.assertAllEqual([[3, 3], [3, 3]],
|
||||
a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
|
||||
self.assertAllEqual(
|
||||
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
a.eval(feed_dict={p: [[6, 6, 6], [6, 6, 6]]})
|
||||
|
||||
def testPartialShape(self):
|
||||
with self.test_session():
|
||||
p = tf.placeholder_with_default([1, 2, 3], shape=[None])
|
||||
a = tf.identity(p)
|
||||
p = array_ops.placeholder_with_default([1, 2, 3], shape=[None])
|
||||
a = array_ops.identity(p)
|
||||
self.assertAllEqual([1, 2, 3], a.eval())
|
||||
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
|
||||
|
||||
@ -729,13 +747,13 @@ class PlaceholderWithDefaultTest(tf.test.TestCase):
|
||||
|
||||
def testNoShape(self):
|
||||
with self.test_session():
|
||||
p = tf.placeholder_with_default([17], shape=None)
|
||||
a = tf.identity(p)
|
||||
p = array_ops.placeholder_with_default([17], shape=None)
|
||||
a = array_ops.identity(p)
|
||||
self.assertAllEqual([17], a.eval())
|
||||
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
|
||||
self.assertAllEqual([[3, 3], [3, 3]],
|
||||
a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
|
||||
self.assertAllEqual(
|
||||
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -12,41 +12,43 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for convolution related functionality in tensorflow.ops.nn."""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import nn_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class Conv1DTest(tf.test.TestCase):
|
||||
class Conv1DTest(test.TestCase):
|
||||
|
||||
def testBasic(self):
|
||||
"""Test that argument passing to conv2d is handled properly."""
|
||||
|
||||
x = tf.constant([1, 2, 3, 4], dtype=tf.float32)
|
||||
x = tf.expand_dims(x, 0) # Add batch dimension
|
||||
x = tf.expand_dims(x, 2) # And depth dimension
|
||||
filters = tf.constant([2, 1], dtype=tf.float32)
|
||||
filters = tf.expand_dims(filters, 1) # in_channels
|
||||
filters = tf.expand_dims(filters, 2) # out_channels
|
||||
x = constant_op.constant([1, 2, 3, 4], dtype=dtypes.float32)
|
||||
x = array_ops.expand_dims(x, 0) # Add batch dimension
|
||||
x = array_ops.expand_dims(x, 2) # And depth dimension
|
||||
filters = constant_op.constant([2, 1], dtype=dtypes.float32)
|
||||
filters = array_ops.expand_dims(filters, 1) # in_channels
|
||||
filters = array_ops.expand_dims(filters, 2) # out_channels
|
||||
# Filters is 2x1x1
|
||||
for stride in [1, 2]:
|
||||
with self.test_session():
|
||||
c = tf.nn.conv1d(x, filters, stride, padding="VALID")
|
||||
reduced = tf.squeeze(c)
|
||||
c = nn_ops.conv1d(x, filters, stride, padding="VALID")
|
||||
reduced = array_ops.squeeze(c)
|
||||
output = reduced.eval()
|
||||
if stride == 1:
|
||||
self.assertEqual(len(output), 3)
|
||||
self.assertAllClose(output,
|
||||
[2*1+1*2, 2*2+1*3, 2*3+1*4])
|
||||
[2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * 3 + 1 * 4])
|
||||
else:
|
||||
self.assertEqual(len(output), 2)
|
||||
self.assertAllClose(output,
|
||||
[2*1+1*2, 2*3+1*4])
|
||||
self.assertAllClose(output, [2 * 1 + 1 * 2, 2 * 3 + 1 * 4])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,17 +12,24 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for convolution related functionality in tensorflow.ops.nn."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class Conv2DBackpropFilterGradTest(tf.test.TestCase):
|
||||
class Conv2DBackpropFilterGradTest(test.TestCase):
|
||||
|
||||
def testGradient(self):
|
||||
with self.test_session():
|
||||
@ -30,28 +37,29 @@ class Conv2DBackpropFilterGradTest(tf.test.TestCase):
|
||||
for stride in [1, 2]:
|
||||
np.random.seed(1)
|
||||
in_shape = [5, 8, 6, 4]
|
||||
in_val = tf.constant(
|
||||
2 * np.random.random_sample(in_shape) - 1,
|
||||
dtype=tf.float32)
|
||||
in_val = constant_op.constant(
|
||||
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
|
||||
filter_shape = [3, 3, 4, 6]
|
||||
# Make a convolution op with the current settings, just to easily get
|
||||
# the shape of the output.
|
||||
conv_out = tf.nn.conv2d(in_val, tf.zeros(filter_shape),
|
||||
[1, stride, stride, 1], padding)
|
||||
conv_out = nn_ops.conv2d(in_val,
|
||||
array_ops.zeros(filter_shape),
|
||||
[1, stride, stride, 1], padding)
|
||||
out_backprop_shape = conv_out.get_shape().as_list()
|
||||
out_backprop_val = tf.constant(
|
||||
out_backprop_val = constant_op.constant(
|
||||
2 * np.random.random_sample(out_backprop_shape) - 1,
|
||||
dtype=tf.float32)
|
||||
output = tf.nn.conv2d_backprop_filter(in_val, filter_shape,
|
||||
out_backprop_val,
|
||||
[1, stride, stride, 1], padding)
|
||||
err = tf.test.compute_gradient_error([in_val, out_backprop_val],
|
||||
[in_shape, out_backprop_shape],
|
||||
output, filter_shape)
|
||||
dtype=dtypes.float32)
|
||||
output = nn_ops.conv2d_backprop_filter(in_val, filter_shape,
|
||||
out_backprop_val,
|
||||
[1, stride, stride, 1],
|
||||
padding)
|
||||
err = gradient_checker.compute_gradient_error(
|
||||
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
|
||||
output, filter_shape)
|
||||
print("conv2d_backprop_filter gradient err = %g " % err)
|
||||
err_tolerance = 2e-3
|
||||
self.assertLess(err, err_tolerance)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,19 +12,25 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for convolution related functionality in tensorflow.ops.nn."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.client import device_lib
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class Conv2DTransposeTest(tf.test.TestCase):
|
||||
class Conv2DTransposeTest(test.TestCase):
|
||||
|
||||
def testConv2DTransposeSingleStride(self):
|
||||
with self.test_session():
|
||||
@ -37,10 +43,12 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
|
||||
f_shape = [3, 3, 2, 3]
|
||||
|
||||
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
|
||||
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
|
||||
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="SAME")
|
||||
x = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
output = nn_ops.conv2d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="SAME")
|
||||
value = output.eval()
|
||||
|
||||
# We count the number of cells being added at the locations in the output.
|
||||
@ -73,10 +81,12 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
|
||||
f_shape = [3, 3, 2, 3]
|
||||
|
||||
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
|
||||
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
|
||||
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="SAME")
|
||||
x = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
output = nn_ops.conv2d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="SAME")
|
||||
value = output.eval()
|
||||
|
||||
for n in xrange(x_shape[0]):
|
||||
@ -104,10 +114,12 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
|
||||
f_shape = [3, 3, 2, 3]
|
||||
|
||||
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
|
||||
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
|
||||
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="VALID")
|
||||
x = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
output = nn_ops.conv2d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="VALID")
|
||||
value = output.eval()
|
||||
|
||||
cache_values = np.zeros(y_shape, dtype=np.float32)
|
||||
@ -121,10 +133,10 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
for h in xrange(pad, y_shape[1] - pad):
|
||||
target = 3.0
|
||||
# We add a case for locations divisible by the stride.
|
||||
h_in = h % strides[
|
||||
1] == 0 and h > pad and h < y_shape[1] - 1 - pad
|
||||
w_in = w % strides[
|
||||
2] == 0 and w > pad and w < y_shape[2] - 1 - pad
|
||||
h_in = h % strides[1] == 0 and h > pad and h < y_shape[
|
||||
1] - 1 - pad
|
||||
w_in = w % strides[2] == 0 and w > pad and w < y_shape[
|
||||
2] - 1 - pad
|
||||
if h_in and w_in:
|
||||
target += 9.0
|
||||
elif h_in or w_in:
|
||||
@ -148,19 +160,19 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
x_val = np.random.random_sample(x_shape).astype(np.float64)
|
||||
f_val = np.random.random_sample(f_shape).astype(np.float64)
|
||||
with self.test_session():
|
||||
x = tf.constant(x_val, name="x", dtype=tf.float32)
|
||||
f = tf.constant(f_val, name="f", dtype=tf.float32)
|
||||
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="SAME")
|
||||
err = tf.test.compute_gradient_error(
|
||||
[x, f], [x_shape, f_shape], output, y_shape)
|
||||
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
|
||||
output = nn_ops.conv2d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="SAME")
|
||||
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
|
||||
output, y_shape)
|
||||
print("conv2d_transpose gradient err = %g " % err)
|
||||
err_tolerance = 0.0005
|
||||
self.assertLess(err, err_tolerance)
|
||||
|
||||
def testConv2DTransposeSingleStrideNCHW(self):
|
||||
# `NCHW` data fomat is only supported for CUDA device.
|
||||
if tf.test.is_gpu_available(cuda_only=True):
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.test_session(use_gpu=True):
|
||||
strides = [1, 1, 1, 1]
|
||||
|
||||
@ -171,11 +183,13 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
|
||||
f_shape = [3, 3, 2, 3]
|
||||
|
||||
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
|
||||
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
|
||||
x = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
|
||||
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="SAME", data_format='NCHW')
|
||||
output = nn_ops.conv2d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
|
||||
|
||||
value = output.eval()
|
||||
for n in xrange(x_shape[0]):
|
||||
@ -193,7 +207,7 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
|
||||
def testConv2DTransposeSameNCHW(self):
|
||||
# `NCHW` data fomat is only supported for CUDA device.
|
||||
if tf.test.is_gpu_available(cuda_only=True):
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.test_session(use_gpu=True):
|
||||
strides = [1, 1, 2, 2]
|
||||
|
||||
@ -204,11 +218,13 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
|
||||
f_shape = [3, 3, 2, 3]
|
||||
|
||||
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
|
||||
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
|
||||
x = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
|
||||
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="SAME", data_format='NCHW')
|
||||
output = nn_ops.conv2d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
|
||||
|
||||
value = output.eval()
|
||||
for n in xrange(x_shape[0]):
|
||||
@ -227,7 +243,7 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
|
||||
def testConv2DTransposeValidNCHW(self):
|
||||
# `NCHW` data fomat is only supported for CUDA device.
|
||||
if tf.test.is_gpu_available(cuda_only=True):
|
||||
if test.is_gpu_available(cuda_only=True):
|
||||
with self.test_session(use_gpu=True):
|
||||
strides = [1, 1, 2, 2]
|
||||
|
||||
@ -238,10 +254,12 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
|
||||
f_shape = [3, 3, 2, 3]
|
||||
|
||||
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
|
||||
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
|
||||
output = tf.nn.conv2d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="VALID", data_format='NCHW')
|
||||
x = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
output = nn_ops.conv2d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="VALID", data_format="NCHW")
|
||||
|
||||
value = output.eval()
|
||||
cache_values = np.zeros(y_shape, dtype=np.float32)
|
||||
@ -253,10 +271,10 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
for h in xrange(pad, y_shape[2] - pad):
|
||||
target = 3.0
|
||||
# We add a case for locations divisible by the stride.
|
||||
h_in = h % strides[
|
||||
2] == 0 and h > pad and h < y_shape[2] - 1 - pad
|
||||
w_in = w % strides[
|
||||
3] == 0 and w > pad and w < y_shape[3] - 1 - pad
|
||||
h_in = h % strides[2] == 0 and h > pad and h < y_shape[
|
||||
2] - 1 - pad
|
||||
w_in = w % strides[3] == 0 and w > pad and w < y_shape[
|
||||
3] - 1 - pad
|
||||
if h_in and w_in:
|
||||
target += 9.0
|
||||
elif h_in or w_in:
|
||||
@ -273,4 +291,4 @@ class Conv2DTransposeTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,17 +12,24 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for convolution related functionality in tensorflow.ops.nn."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class Conv3DBackpropFilterV2GradTest(tf.test.TestCase):
|
||||
class Conv3DBackpropFilterV2GradTest(test.TestCase):
|
||||
|
||||
def testGradient(self):
|
||||
with self.test_session():
|
||||
@ -30,29 +37,29 @@ class Conv3DBackpropFilterV2GradTest(tf.test.TestCase):
|
||||
for stride in [1, 2]:
|
||||
np.random.seed(1)
|
||||
in_shape = [2, 4, 3, 3, 2]
|
||||
in_val = tf.constant(
|
||||
2 * np.random.random_sample(in_shape) - 1,
|
||||
dtype=tf.float32)
|
||||
in_val = constant_op.constant(
|
||||
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
|
||||
filter_shape = [3, 3, 3, 2, 3]
|
||||
strides = [1, stride, stride, stride, 1]
|
||||
# Make a convolution op with the current settings, just to easily get
|
||||
# the shape of the output.
|
||||
conv_out = tf.nn.conv3d(in_val, tf.zeros(filter_shape), strides,
|
||||
padding)
|
||||
conv_out = nn_ops.conv3d(in_val,
|
||||
array_ops.zeros(filter_shape), strides,
|
||||
padding)
|
||||
out_backprop_shape = conv_out.get_shape().as_list()
|
||||
out_backprop_val = tf.constant(
|
||||
out_backprop_val = constant_op.constant(
|
||||
2 * np.random.random_sample(out_backprop_shape) - 1,
|
||||
dtype=tf.float32)
|
||||
output = tf.nn.conv3d_backprop_filter_v2(in_val, filter_shape,
|
||||
out_backprop_val,
|
||||
strides, padding)
|
||||
err = tf.test.compute_gradient_error([in_val, out_backprop_val],
|
||||
[in_shape, out_backprop_shape],
|
||||
output, filter_shape)
|
||||
dtype=dtypes.float32)
|
||||
output = nn_ops.conv3d_backprop_filter_v2(in_val, filter_shape,
|
||||
out_backprop_val, strides,
|
||||
padding)
|
||||
err = gradient_checker.compute_gradient_error(
|
||||
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
|
||||
output, filter_shape)
|
||||
print("conv3d_backprop_filter gradient err = %g " % err)
|
||||
err_tolerance = 1e-3
|
||||
self.assertLess(err, err_tolerance)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,18 +12,24 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for convolution related functionality in tensorflow.ops.nn."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class Conv3DTransposeTest(tf.test.TestCase):
|
||||
class Conv3DTransposeTest(test.TestCase):
|
||||
|
||||
def testConv3DTransposeSingleStride(self):
|
||||
with self.test_session():
|
||||
@ -36,10 +42,12 @@ class Conv3DTransposeTest(tf.test.TestCase):
|
||||
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
|
||||
f_shape = [3, 3, 3, 2, 3]
|
||||
|
||||
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
|
||||
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
|
||||
output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="SAME")
|
||||
x = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
output = nn_ops.conv3d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="SAME")
|
||||
value = output.eval()
|
||||
|
||||
# We count the number of cells being added at the locations in the output.
|
||||
@ -84,10 +92,12 @@ class Conv3DTransposeTest(tf.test.TestCase):
|
||||
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
|
||||
f_shape = [3, 3, 3, 2, 3]
|
||||
|
||||
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
|
||||
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
|
||||
output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="SAME")
|
||||
x = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
output = nn_ops.conv3d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="SAME")
|
||||
value = output.eval()
|
||||
|
||||
for n in xrange(x_shape[0]):
|
||||
@ -120,10 +130,12 @@ class Conv3DTransposeTest(tf.test.TestCase):
|
||||
# Filter: [kernel_depth, kernel_height, kernel_width, out_depth, in_depth]
|
||||
f_shape = [3, 3, 3, 2, 3]
|
||||
|
||||
x = tf.constant(1.0, shape=x_shape, name="x", dtype=tf.float32)
|
||||
f = tf.constant(1.0, shape=f_shape, name="filter", dtype=tf.float32)
|
||||
output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="VALID")
|
||||
x = constant_op.constant(
|
||||
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(
|
||||
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
|
||||
output = nn_ops.conv3d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="VALID")
|
||||
value = output.eval()
|
||||
|
||||
cache_values = np.zeros(y_shape, dtype=np.float32)
|
||||
@ -169,16 +181,16 @@ class Conv3DTransposeTest(tf.test.TestCase):
|
||||
x_val = np.random.random_sample(x_shape).astype(np.float64)
|
||||
f_val = np.random.random_sample(f_shape).astype(np.float64)
|
||||
with self.test_session():
|
||||
x = tf.constant(x_val, name="x", dtype=tf.float32)
|
||||
f = tf.constant(f_val, name="f", dtype=tf.float32)
|
||||
output = tf.nn.conv3d_transpose(x, f, y_shape, strides=strides,
|
||||
padding="SAME")
|
||||
err = tf.test.compute_gradient_error(
|
||||
[x, f], [x_shape, f_shape], output, y_shape)
|
||||
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
|
||||
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
|
||||
output = nn_ops.conv3d_transpose(
|
||||
x, f, y_shape, strides=strides, padding="SAME")
|
||||
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
|
||||
output, y_shape)
|
||||
print("conv3d_transpose gradient err = %g " % err)
|
||||
err_tolerance = 0.0005
|
||||
self.assertLess(err, err_tolerance)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -13,19 +13,26 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Functional tests for 3d convolutional operations."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import math
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class Conv3DTest(tf.test.TestCase):
|
||||
class Conv3DTest(test.TestCase):
|
||||
|
||||
def _VerifyValues(
|
||||
self, tensor_in_sizes, filter_in_sizes, stride, padding, expected):
|
||||
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
|
||||
expected):
|
||||
total_size_1 = 1
|
||||
total_size_2 = 1
|
||||
for s in tensor_in_sizes:
|
||||
@ -43,55 +50,119 @@ class Conv3DTest(tf.test.TestCase):
|
||||
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
|
||||
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
t1 = tf.constant(x1, shape=tensor_in_sizes)
|
||||
t2 = tf.constant(x2, shape=filter_in_sizes)
|
||||
conv = tf.nn.conv3d(t1, t2, strides, padding=padding)
|
||||
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
|
||||
t2 = constant_op.constant(x2, shape=filter_in_sizes)
|
||||
conv = nn_ops.conv3d(t1, t2, strides, padding=padding)
|
||||
value = sess.run(conv)
|
||||
print("expected = ", expected)
|
||||
print("actual = ", value)
|
||||
self.assertArrayNear(expected, value.flatten(), 1e-5)
|
||||
|
||||
def testConv3D1x1x1Filter(self):
|
||||
expected_output = [30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0,
|
||||
138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0,
|
||||
312.0]
|
||||
expected_output = [
|
||||
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
|
||||
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
|
||||
]
|
||||
|
||||
# These are equivalent to the Conv2D1x1 case.
|
||||
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 1, 3],
|
||||
filter_in_sizes=[1, 1, 1, 3, 3],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(tensor_in_sizes=[1, 2, 1, 3, 3],
|
||||
filter_in_sizes=[1, 1, 1, 3, 3],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(tensor_in_sizes=[1, 1, 2, 3, 3],
|
||||
filter_in_sizes=[1, 1, 1, 3, 3],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 2, 3, 1, 3],
|
||||
filter_in_sizes=[1, 1, 1, 3, 3],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 2, 1, 3, 3],
|
||||
filter_in_sizes=[1, 1, 1, 3, 3],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 1, 2, 3, 3],
|
||||
filter_in_sizes=[1, 1, 1, 3, 3],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
|
||||
# Expected values computed using scipy's correlate function.
|
||||
def testConv3D2x2x2Filter(self):
|
||||
expected_output = [19554., 19962., 20370., 22110., 22590., 23070., 34890.,
|
||||
35730., 36570., 37446., 38358., 39270., 50226., 51498.,
|
||||
52770., 52782., 54126., 55470.]
|
||||
expected_output = [
|
||||
19554., 19962., 20370., 22110., 22590., 23070., 34890., 35730., 36570.,
|
||||
37446., 38358., 39270., 50226., 51498., 52770., 52782., 54126., 55470.
|
||||
]
|
||||
# expected_shape = [1, 3, 1, 2, 5]
|
||||
self._VerifyValues(tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
|
||||
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
|
||||
stride=1, padding="VALID",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
|
||||
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
|
||||
def testConv3DStrides(self):
|
||||
expected_output = [
|
||||
102., 151., 172., 193., 214., 235., 142., 438., 592., 613., 634., 655.,
|
||||
676., 394., 774., 1033., 1054., 1075., 1096., 1117., 646., 1894., 2503.,
|
||||
2524., 2545., 2566., 2587., 1486., 2230., 2944., 2965., 2986., 3007.,
|
||||
3028., 1738., 2566., 3385., 3406., 3427., 3448., 3469., 1990., 3686.,
|
||||
4855., 4876., 4897., 4918., 4939., 2830., 4022., 5296., 5317., 5338.,
|
||||
5359., 5380., 3082., 4358., 5737., 5758., 5779., 5800., 5821., 3334.,
|
||||
102.,
|
||||
151.,
|
||||
172.,
|
||||
193.,
|
||||
214.,
|
||||
235.,
|
||||
142.,
|
||||
438.,
|
||||
592.,
|
||||
613.,
|
||||
634.,
|
||||
655.,
|
||||
676.,
|
||||
394.,
|
||||
774.,
|
||||
1033.,
|
||||
1054.,
|
||||
1075.,
|
||||
1096.,
|
||||
1117.,
|
||||
646.,
|
||||
1894.,
|
||||
2503.,
|
||||
2524.,
|
||||
2545.,
|
||||
2566.,
|
||||
2587.,
|
||||
1486.,
|
||||
2230.,
|
||||
2944.,
|
||||
2965.,
|
||||
2986.,
|
||||
3007.,
|
||||
3028.,
|
||||
1738.,
|
||||
2566.,
|
||||
3385.,
|
||||
3406.,
|
||||
3427.,
|
||||
3448.,
|
||||
3469.,
|
||||
1990.,
|
||||
3686.,
|
||||
4855.,
|
||||
4876.,
|
||||
4897.,
|
||||
4918.,
|
||||
4939.,
|
||||
2830.,
|
||||
4022.,
|
||||
5296.,
|
||||
5317.,
|
||||
5338.,
|
||||
5359.,
|
||||
5380.,
|
||||
3082.,
|
||||
4358.,
|
||||
5737.,
|
||||
5758.,
|
||||
5779.,
|
||||
5800.,
|
||||
5821.,
|
||||
3334.,
|
||||
]
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 5, 8, 7, 1],
|
||||
@ -102,11 +173,12 @@ class Conv3DTest(tf.test.TestCase):
|
||||
|
||||
def testConv3D2x2x2FilterStride2(self):
|
||||
expected_output = [19554., 19962., 20370., 50226., 51498., 52770.]
|
||||
self._VerifyValues(tensor_in_sizes=[1, 4, 2, 3, 3],
|
||||
filter_in_sizes=[2, 2, 2, 3, 3],
|
||||
stride=2,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 4, 2, 3, 3],
|
||||
filter_in_sizes=[2, 2, 2, 3, 3],
|
||||
stride=2,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
|
||||
def testConv3DStride3(self):
|
||||
expected_output = [
|
||||
@ -116,71 +188,68 @@ class Conv3DTest(tf.test.TestCase):
|
||||
120912., 117204., 123270., 129336., 118464., 124602., 130740., 119724.,
|
||||
125934., 132144.
|
||||
]
|
||||
self._VerifyValues(tensor_in_sizes=[1, 6, 7, 8, 2],
|
||||
filter_in_sizes=[3, 2, 1, 2, 3],
|
||||
stride=3,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 6, 7, 8, 2],
|
||||
filter_in_sizes=[3, 2, 1, 2, 3],
|
||||
stride=3,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
|
||||
def testConv3D2x2x2FilterStride2Same(self):
|
||||
expected_output = [
|
||||
19554., 19962., 20370., 10452., 10710., 10968., 50226., 51498., 52770.,
|
||||
23844., 24534., 25224.
|
||||
]
|
||||
self._VerifyValues(tensor_in_sizes=[1, 4, 2, 3, 3],
|
||||
filter_in_sizes=[2, 2, 2, 3, 3],
|
||||
stride=2,
|
||||
padding="SAME",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 4, 2, 3, 3],
|
||||
filter_in_sizes=[2, 2, 2, 3, 3],
|
||||
stride=2,
|
||||
padding="SAME",
|
||||
expected=expected_output)
|
||||
|
||||
def testKernelSmallerThanStride(self):
|
||||
expected_output = [1., 3., 7., 9., 19., 21., 25., 27.]
|
||||
self._VerifyValues(tensor_in_sizes=[1, 3, 3, 3, 1],
|
||||
filter_in_sizes=[1, 1, 1, 1, 1],
|
||||
stride=2,
|
||||
padding="SAME",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(tensor_in_sizes=[1, 3, 3, 3, 1],
|
||||
filter_in_sizes=[1, 1, 1, 1, 1],
|
||||
stride=2,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 3, 3, 3, 1],
|
||||
filter_in_sizes=[1, 1, 1, 1, 1],
|
||||
stride=2,
|
||||
padding="SAME",
|
||||
expected=expected_output)
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 3, 3, 3, 1],
|
||||
filter_in_sizes=[1, 1, 1, 1, 1],
|
||||
stride=2,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
|
||||
expected_output = [1484., 1592., 770.,
|
||||
2240., 2348., 1106.,
|
||||
1149., 1191., 539.,
|
||||
expected_output = [
|
||||
1484., 1592., 770., 2240., 2348., 1106., 1149., 1191., 539., 6776.,
|
||||
6884., 3122., 7532., 7640., 3458., 3207., 3249., 1421., 3005., 3035.,
|
||||
1225., 3215., 3245., 1309., 1013., 1022., 343.
|
||||
]
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 7, 7, 7, 1],
|
||||
filter_in_sizes=[2, 2, 2, 1, 1],
|
||||
stride=3,
|
||||
padding="SAME",
|
||||
expected=expected_output)
|
||||
|
||||
6776., 6884., 3122.,
|
||||
7532., 7640., 3458.,
|
||||
3207., 3249., 1421.,
|
||||
|
||||
3005., 3035., 1225.,
|
||||
3215., 3245., 1309.,
|
||||
1013., 1022., 343.]
|
||||
self._VerifyValues(tensor_in_sizes=[1, 7, 7, 7, 1],
|
||||
filter_in_sizes=[2, 2, 2, 1, 1],
|
||||
stride=3,
|
||||
padding="SAME",
|
||||
expected=expected_output)
|
||||
|
||||
expected_output = [1484., 1592.,
|
||||
2240., 2348.,
|
||||
|
||||
6776., 6884.,
|
||||
7532., 7640.]
|
||||
self._VerifyValues(tensor_in_sizes=[1, 7, 7, 7, 1],
|
||||
filter_in_sizes=[2, 2, 2, 1, 1],
|
||||
stride=3,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
expected_output = [1484., 1592., 2240., 2348., 6776., 6884., 7532., 7640.]
|
||||
self._VerifyValues(
|
||||
tensor_in_sizes=[1, 7, 7, 7, 1],
|
||||
filter_in_sizes=[2, 2, 2, 1, 1],
|
||||
stride=3,
|
||||
padding="VALID",
|
||||
expected=expected_output)
|
||||
|
||||
def ConstructAndTestGradient(self, batch, input_planes, input_rows,
|
||||
input_cols, filter_planes, filter_rows,
|
||||
filter_cols, in_depth, out_depth, stride,
|
||||
padding, test_input):
|
||||
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
|
||||
filter_shape = [filter_planes, filter_rows, filter_cols, in_depth,
|
||||
out_depth]
|
||||
filter_shape = [
|
||||
filter_planes, filter_rows, filter_cols, in_depth, out_depth
|
||||
]
|
||||
|
||||
if isinstance(stride, collections.Iterable):
|
||||
strides = [1] + list(stride) + [1]
|
||||
@ -188,12 +257,12 @@ class Conv3DTest(tf.test.TestCase):
|
||||
strides = [1, stride, stride, stride, 1]
|
||||
|
||||
if padding == "VALID":
|
||||
output_planes = int(math.ceil((input_planes - filter_planes + 1.0) /
|
||||
strides[1]))
|
||||
output_rows = int(math.ceil((input_rows - filter_rows + 1.0) /
|
||||
strides[2]))
|
||||
output_cols = int(math.ceil((input_cols - filter_cols + 1.0) /
|
||||
strides[3]))
|
||||
output_planes = int(
|
||||
math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
|
||||
output_rows = int(
|
||||
math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
|
||||
output_cols = int(
|
||||
math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
|
||||
else:
|
||||
output_planes = int(math.ceil(float(input_planes) / strides[1]))
|
||||
output_rows = int(math.ceil(float(input_rows) / strides[2]))
|
||||
@ -207,9 +276,9 @@ class Conv3DTest(tf.test.TestCase):
|
||||
filter_size *= x
|
||||
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
|
||||
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
|
||||
if tf.test.is_gpu_available():
|
||||
data_type = tf.float32
|
||||
if tf.test.is_gpu_available():
|
||||
if test.is_gpu_available():
|
||||
data_type = dtypes.float32
|
||||
if test.is_gpu_available():
|
||||
tolerance = 4e-3
|
||||
else:
|
||||
# As of Aug 2016, higher tolerance is needed for some CPU architectures.
|
||||
@ -217,228 +286,236 @@ class Conv3DTest(tf.test.TestCase):
|
||||
# because of multithreading.
|
||||
tolerance = 8e-3
|
||||
else:
|
||||
data_type = tf.float64
|
||||
data_type = dtypes.float64
|
||||
tolerance = 1e-8
|
||||
with self.test_session(use_gpu=True):
|
||||
input_tensor = tf.constant(input_data,
|
||||
shape=input_shape,
|
||||
dtype=data_type,
|
||||
name="input")
|
||||
filter_tensor = tf.constant(filter_data,
|
||||
shape=filter_shape,
|
||||
dtype=data_type,
|
||||
name="filter")
|
||||
conv = tf.nn.conv3d(input_tensor,
|
||||
filter_tensor,
|
||||
strides,
|
||||
padding,
|
||||
name="conv")
|
||||
input_tensor = constant_op.constant(
|
||||
input_data, shape=input_shape, dtype=data_type, name="input")
|
||||
filter_tensor = constant_op.constant(
|
||||
filter_data, shape=filter_shape, dtype=data_type, name="filter")
|
||||
conv = nn_ops.conv3d(
|
||||
input_tensor, filter_tensor, strides, padding, name="conv")
|
||||
|
||||
if test_input:
|
||||
err = tf.test.compute_gradient_error(input_tensor, input_shape, conv,
|
||||
output_shape)
|
||||
err = gradient_checker.compute_gradient_error(input_tensor, input_shape,
|
||||
conv, output_shape)
|
||||
else:
|
||||
err = tf.test.compute_gradient_error(filter_tensor, filter_shape, conv,
|
||||
output_shape)
|
||||
err = gradient_checker.compute_gradient_error(filter_tensor,
|
||||
filter_shape, conv,
|
||||
output_shape)
|
||||
print("conv3d gradient error = ", err)
|
||||
self.assertLess(err, tolerance)
|
||||
|
||||
def testInputGradientValidPaddingStrideOne(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=3,
|
||||
input_rows=5,
|
||||
input_cols=4,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
test_input=True)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=3,
|
||||
input_rows=5,
|
||||
input_cols=4,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
test_input=True)
|
||||
|
||||
def testFilterGradientValidPaddingStrideOne(self):
|
||||
self.ConstructAndTestGradient(batch=4,
|
||||
input_planes=4,
|
||||
input_rows=6,
|
||||
input_cols=5,
|
||||
filter_planes=2,
|
||||
filter_rows=2,
|
||||
filter_cols=2,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
test_input=False)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=4,
|
||||
input_planes=4,
|
||||
input_rows=6,
|
||||
input_cols=5,
|
||||
filter_planes=2,
|
||||
filter_rows=2,
|
||||
filter_cols=2,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
test_input=False)
|
||||
|
||||
def testInputGradientValidPaddingStrideTwo(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=6,
|
||||
input_rows=3,
|
||||
input_cols=5,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=2,
|
||||
padding="VALID",
|
||||
test_input=True)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=6,
|
||||
input_rows=3,
|
||||
input_cols=5,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=2,
|
||||
padding="VALID",
|
||||
test_input=True)
|
||||
|
||||
def testFilterGradientValidPaddingStrideTwo(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=7,
|
||||
input_rows=6,
|
||||
input_cols=5,
|
||||
filter_planes=2,
|
||||
filter_rows=2,
|
||||
filter_cols=2,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=2,
|
||||
padding="VALID",
|
||||
test_input=False)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=7,
|
||||
input_rows=6,
|
||||
input_cols=5,
|
||||
filter_planes=2,
|
||||
filter_rows=2,
|
||||
filter_cols=2,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=2,
|
||||
padding="VALID",
|
||||
test_input=False)
|
||||
|
||||
def testInputGradientValidPaddingStrideThree(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=3,
|
||||
input_rows=7,
|
||||
input_cols=6,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=3,
|
||||
padding="VALID",
|
||||
test_input=True)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=3,
|
||||
input_rows=7,
|
||||
input_cols=6,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=3,
|
||||
padding="VALID",
|
||||
test_input=True)
|
||||
|
||||
def testFilterGradientValidPaddingStrideThree(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=4,
|
||||
input_rows=4,
|
||||
input_cols=7,
|
||||
filter_planes=4,
|
||||
filter_rows=4,
|
||||
filter_cols=4,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=3,
|
||||
padding="VALID",
|
||||
test_input=False)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=4,
|
||||
input_rows=4,
|
||||
input_cols=7,
|
||||
filter_planes=4,
|
||||
filter_rows=4,
|
||||
filter_cols=4,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=3,
|
||||
padding="VALID",
|
||||
test_input=False)
|
||||
|
||||
def testInputGradientSamePaddingStrideOne(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=3,
|
||||
input_rows=2,
|
||||
input_cols=2,
|
||||
filter_planes=3,
|
||||
filter_rows=2,
|
||||
filter_cols=1,
|
||||
in_depth=2,
|
||||
out_depth=1,
|
||||
stride=1,
|
||||
padding="SAME",
|
||||
test_input=True)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=3,
|
||||
input_rows=2,
|
||||
input_cols=2,
|
||||
filter_planes=3,
|
||||
filter_rows=2,
|
||||
filter_cols=1,
|
||||
in_depth=2,
|
||||
out_depth=1,
|
||||
stride=1,
|
||||
padding="SAME",
|
||||
test_input=True)
|
||||
|
||||
def testFilterGradientSamePaddingStrideOne(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=3,
|
||||
input_rows=6,
|
||||
input_cols=5,
|
||||
filter_planes=2,
|
||||
filter_rows=2,
|
||||
filter_cols=2,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=1,
|
||||
padding="SAME",
|
||||
test_input=False)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=3,
|
||||
input_rows=6,
|
||||
input_cols=5,
|
||||
filter_planes=2,
|
||||
filter_rows=2,
|
||||
filter_cols=2,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=1,
|
||||
padding="SAME",
|
||||
test_input=False)
|
||||
|
||||
def testInputGradientSamePaddingStrideTwo(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=6,
|
||||
input_rows=3,
|
||||
input_cols=4,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=2,
|
||||
padding="SAME",
|
||||
test_input=True)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=6,
|
||||
input_rows=3,
|
||||
input_cols=4,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=2,
|
||||
padding="SAME",
|
||||
test_input=True)
|
||||
|
||||
def testFilterGradientSamePaddingStrideTwo(self):
|
||||
self.ConstructAndTestGradient(batch=4,
|
||||
input_planes=7,
|
||||
input_rows=3,
|
||||
input_cols=5,
|
||||
filter_planes=2,
|
||||
filter_rows=2,
|
||||
filter_cols=2,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=2,
|
||||
padding="SAME",
|
||||
test_input=False)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=4,
|
||||
input_planes=7,
|
||||
input_rows=3,
|
||||
input_cols=5,
|
||||
filter_planes=2,
|
||||
filter_rows=2,
|
||||
filter_cols=2,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=2,
|
||||
padding="SAME",
|
||||
test_input=False)
|
||||
|
||||
def testInputGradientSamePaddingStrideThree(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=9,
|
||||
input_rows=3,
|
||||
input_cols=6,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=3,
|
||||
padding="SAME",
|
||||
test_input=True)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=9,
|
||||
input_rows=3,
|
||||
input_cols=6,
|
||||
filter_planes=3,
|
||||
filter_rows=3,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=3,
|
||||
padding="SAME",
|
||||
test_input=True)
|
||||
|
||||
def testFilterGradientSamePaddingStrideThree(self):
|
||||
self.ConstructAndTestGradient(batch=2,
|
||||
input_planes=9,
|
||||
input_rows=4,
|
||||
input_cols=7,
|
||||
filter_planes=4,
|
||||
filter_rows=4,
|
||||
filter_cols=4,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=3,
|
||||
padding="SAME",
|
||||
test_input=False)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=2,
|
||||
input_planes=9,
|
||||
input_rows=4,
|
||||
input_cols=7,
|
||||
filter_planes=4,
|
||||
filter_rows=4,
|
||||
filter_cols=4,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=3,
|
||||
padding="SAME",
|
||||
test_input=False)
|
||||
|
||||
def testInputGradientSamePaddingDifferentStrides(self):
|
||||
self.ConstructAndTestGradient(batch=1,
|
||||
input_planes=5,
|
||||
input_rows=8,
|
||||
input_cols=7,
|
||||
filter_planes=1,
|
||||
filter_rows=2,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=[2, 3, 1],
|
||||
padding="SAME",
|
||||
test_input=True)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=1,
|
||||
input_planes=5,
|
||||
input_rows=8,
|
||||
input_cols=7,
|
||||
filter_planes=1,
|
||||
filter_rows=2,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=[2, 3, 1],
|
||||
padding="SAME",
|
||||
test_input=True)
|
||||
|
||||
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
|
||||
self.ConstructAndTestGradient(batch=1,
|
||||
input_planes=5,
|
||||
input_rows=8,
|
||||
input_cols=7,
|
||||
filter_planes=1,
|
||||
filter_rows=2,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=[2, 3, 1],
|
||||
padding="SAME",
|
||||
test_input=False)
|
||||
self.ConstructAndTestGradient(
|
||||
batch=1,
|
||||
input_planes=5,
|
||||
input_rows=8,
|
||||
input_cols=7,
|
||||
filter_planes=1,
|
||||
filter_rows=2,
|
||||
filter_cols=3,
|
||||
in_depth=2,
|
||||
out_depth=3,
|
||||
stride=[2, 3, 1],
|
||||
padding="SAME",
|
||||
test_input=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -18,22 +18,28 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class CrossOpTest(tf.test.TestCase):
|
||||
class CrossOpTest(test.TestCase):
|
||||
|
||||
def testGradientRandomValues(self):
|
||||
with self.test_session():
|
||||
us = [2, 3]
|
||||
u = tf.reshape([0.854, -0.616, 0.767, 0.725, -0.927, 0.159], shape=us)
|
||||
v = tf.reshape([-0.522, 0.755, 0.407, -0.652, 0.241, 0.247], shape=us)
|
||||
s = tf.cross(u, v)
|
||||
jacob_u, jacob_v = tf.test.compute_gradient([u, v], [us, us], s, us)
|
||||
u = array_ops.reshape(
|
||||
[0.854, -0.616, 0.767, 0.725, -0.927, 0.159], shape=us)
|
||||
v = array_ops.reshape(
|
||||
[-0.522, 0.755, 0.407, -0.652, 0.241, 0.247], shape=us)
|
||||
s = math_ops.cross(u, v)
|
||||
jacob_u, jacob_v = gradient_checker.compute_gradient([u, v], [us, us], s,
|
||||
us)
|
||||
|
||||
self.assertAllClose(jacob_u[0], jacob_u[1], rtol=1e-3, atol=1e-3)
|
||||
self.assertAllClose(jacob_v[0], jacob_v[1], rtol=1e-3, atol=1e-3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,8 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.ctc_ops.ctc_loss_op."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
@ -23,7 +23,10 @@ import itertools
|
||||
import numpy as np
|
||||
from six.moves import zip_longest
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import ctc_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
def grouper(iterable, n, fillvalue=None):
|
||||
@ -38,25 +41,30 @@ def flatten(list_of_lists):
|
||||
return itertools.chain.from_iterable(list_of_lists)
|
||||
|
||||
|
||||
class CTCGreedyDecoderTest(tf.test.TestCase):
|
||||
class CTCGreedyDecoderTest(test.TestCase):
|
||||
|
||||
def _testCTCDecoder(self, decoder, inputs, seq_lens, log_prob_truth,
|
||||
decode_truth, expected_err_re=None, **decoder_args):
|
||||
inputs_t = [tf.convert_to_tensor(x) for x in inputs]
|
||||
def _testCTCDecoder(self,
|
||||
decoder,
|
||||
inputs,
|
||||
seq_lens,
|
||||
log_prob_truth,
|
||||
decode_truth,
|
||||
expected_err_re=None,
|
||||
**decoder_args):
|
||||
inputs_t = [ops.convert_to_tensor(x) for x in inputs]
|
||||
# convert inputs_t into a [max_time x batch_size x depth] tensor
|
||||
# from a len time python list of [batch_size x depth] tensors
|
||||
inputs_t = tf.stack(inputs_t)
|
||||
inputs_t = array_ops.stack(inputs_t)
|
||||
|
||||
with self.test_session(use_gpu=False) as sess:
|
||||
decoded_list, log_probability = decoder(
|
||||
inputs_t,
|
||||
sequence_length=seq_lens, **decoder_args)
|
||||
decoded_unwrapped = list(flatten([
|
||||
(st.indices, st.values, st.dense_shape) for st in decoded_list]))
|
||||
inputs_t, sequence_length=seq_lens, **decoder_args)
|
||||
decoded_unwrapped = list(
|
||||
flatten([(st.indices, st.values, st.dense_shape) for st in
|
||||
decoded_list]))
|
||||
|
||||
if expected_err_re is None:
|
||||
outputs = sess.run(
|
||||
decoded_unwrapped + [log_probability])
|
||||
outputs = sess.run(decoded_unwrapped + [log_probability])
|
||||
|
||||
# Group outputs into (ix, vals, shape) tuples
|
||||
output_sparse_tensors = list(grouper(outputs[:-1], 3))
|
||||
@ -67,8 +75,8 @@ class CTCGreedyDecoderTest(tf.test.TestCase):
|
||||
self.assertEqual(len(output_sparse_tensors), len(decode_truth))
|
||||
|
||||
# For each SparseTensor tuple, compare (ix, vals, shape)
|
||||
for out_st, truth_st, tf_st in zip(
|
||||
output_sparse_tensors, decode_truth, decoded_list):
|
||||
for out_st, truth_st, tf_st in zip(output_sparse_tensors, decode_truth,
|
||||
decoded_list):
|
||||
self.assertAllEqual(out_st[0], truth_st[0]) # ix
|
||||
self.assertAllEqual(out_st[1], truth_st[1]) # vals
|
||||
self.assertAllEqual(out_st[2], truth_st[2]) # shape
|
||||
@ -105,19 +113,23 @@ class CTCGreedyDecoderTest(tf.test.TestCase):
|
||||
# dimensions are time x depth
|
||||
|
||||
input_prob_matrix_1 = np.asarray(
|
||||
[[0.1, 0.9, 0.0, 0.0], # t=0
|
||||
[0.0, 0.9, 0.1, 0.0], # t=1
|
||||
[0.0, 0.0, 0.1, 0.9], # t=2
|
||||
[0.0, 0.9, 0.1, 0.1], # t=3
|
||||
[0.9, 0.1, 0.0, 0.0], # t=4
|
||||
[0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)
|
||||
[
|
||||
[0.1, 0.9, 0.0, 0.0], # t=0
|
||||
[0.0, 0.9, 0.1, 0.0], # t=1
|
||||
[0.0, 0.0, 0.1, 0.9], # t=2
|
||||
[0.0, 0.9, 0.1, 0.1], # t=3
|
||||
[0.9, 0.1, 0.0, 0.0], # t=4
|
||||
[0.0, 0.0, 0.0, 0.0]
|
||||
], # t=5 (ignored)
|
||||
dtype=np.float32)
|
||||
input_log_prob_matrix_1 = np.log(input_prob_matrix_1)
|
||||
|
||||
# len max_time_steps array of batch_size x depth matrices
|
||||
inputs = [np.vstack([input_log_prob_matrix_0[t, :],
|
||||
input_log_prob_matrix_1[t, :]])
|
||||
for t in range(max_time_steps)]
|
||||
inputs = [
|
||||
np.vstack(
|
||||
[input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
|
||||
for t in range(max_time_steps)
|
||||
]
|
||||
|
||||
# batch_size length vector of sequence_lengths
|
||||
seq_lens = np.array([seq_len_0, seq_len_1], dtype=np.int32)
|
||||
@ -130,21 +142,32 @@ class CTCGreedyDecoderTest(tf.test.TestCase):
|
||||
|
||||
# decode_truth: one SparseTensor (ix, vals, shape)
|
||||
decode_truth = [
|
||||
(np.array([[0, 0], # batch 0, 2 outputs
|
||||
[0, 1],
|
||||
[1, 0], # batch 1, 3 outputs
|
||||
[1, 1],
|
||||
[1, 2]], dtype=np.int64),
|
||||
np.array([0, 1, # batch 0
|
||||
1, 1, 0], # batch 1
|
||||
dtype=np.int64),
|
||||
# shape is batch x max_decoded_length
|
||||
np.array([2, 3], dtype=np.int64)),
|
||||
(
|
||||
np.array(
|
||||
[
|
||||
[0, 0], # batch 0, 2 outputs
|
||||
[0, 1],
|
||||
[1, 0], # batch 1, 3 outputs
|
||||
[1, 1],
|
||||
[1, 2]
|
||||
],
|
||||
dtype=np.int64),
|
||||
np.array(
|
||||
[
|
||||
0,
|
||||
1, # batch 0
|
||||
1,
|
||||
1,
|
||||
0
|
||||
], # batch 1
|
||||
dtype=np.int64),
|
||||
# shape is batch x max_decoded_length
|
||||
np.array(
|
||||
[2, 3], dtype=np.int64)),
|
||||
]
|
||||
|
||||
self._testCTCDecoder(
|
||||
tf.nn.ctc_greedy_decoder,
|
||||
inputs, seq_lens, log_prob_truth, decode_truth)
|
||||
self._testCTCDecoder(ctc_ops.ctc_greedy_decoder, inputs, seq_lens,
|
||||
log_prob_truth, decode_truth)
|
||||
|
||||
def testCTCDecoderBeamSearch(self):
|
||||
"""Test one batch, two beams - hibernating beam search."""
|
||||
@ -153,50 +176,60 @@ class CTCGreedyDecoderTest(tf.test.TestCase):
|
||||
|
||||
seq_len_0 = 5
|
||||
input_prob_matrix_0 = np.asarray(
|
||||
[[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
|
||||
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
|
||||
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
|
||||
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
|
||||
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
|
||||
# Random entry added in at time=5
|
||||
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],
|
||||
[
|
||||
[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
|
||||
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
|
||||
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
|
||||
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
|
||||
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
|
||||
# Random entry added in at time=5
|
||||
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]
|
||||
],
|
||||
dtype=np.float32)
|
||||
# Add arbitrary offset - this is fine
|
||||
input_log_prob_matrix_0 = np.log(input_prob_matrix_0) + 2.0
|
||||
|
||||
# len max_time_steps array of batch_size x depth matrices
|
||||
inputs = ([input_log_prob_matrix_0[t, :][np.newaxis, :]
|
||||
for t in range(seq_len_0)] # Pad to max_time_steps = 8
|
||||
+ 2 * [np.zeros((1, depth), dtype=np.float32)])
|
||||
inputs = ([
|
||||
input_log_prob_matrix_0[t, :][np.newaxis, :] for t in range(seq_len_0)
|
||||
] # Pad to max_time_steps = 8
|
||||
+ 2 * [np.zeros(
|
||||
(1, depth), dtype=np.float32)])
|
||||
|
||||
# batch_size length vector of sequence_lengths
|
||||
seq_lens = np.array([seq_len_0], dtype=np.int32)
|
||||
|
||||
# batch_size length vector of negative log probabilities
|
||||
log_prob_truth = np.array([
|
||||
0.584855, # output beam 0
|
||||
0.389139 # output beam 1
|
||||
], np.float32)[np.newaxis, :]
|
||||
log_prob_truth = np.array(
|
||||
[
|
||||
0.584855, # output beam 0
|
||||
0.389139 # output beam 1
|
||||
],
|
||||
np.float32)[np.newaxis, :]
|
||||
|
||||
# decode_truth: two SparseTensors, (ix, values, shape)
|
||||
decode_truth = [
|
||||
# beam 0, batch 0, two outputs decoded
|
||||
(np.array([[0, 0], [0, 1]], dtype=np.int64),
|
||||
np.array([1, 0], dtype=np.int64),
|
||||
np.array([1, 2], dtype=np.int64)),
|
||||
(np.array(
|
||||
[[0, 0], [0, 1]], dtype=np.int64), np.array(
|
||||
[1, 0], dtype=np.int64), np.array(
|
||||
[1, 2], dtype=np.int64)),
|
||||
# beam 1, batch 0, three outputs decoded
|
||||
(np.array([[0, 0], [0, 1], [0, 2]], dtype=np.int64),
|
||||
np.array([0, 1, 0], dtype=np.int64),
|
||||
np.array([1, 3], dtype=np.int64)),
|
||||
(np.array(
|
||||
[[0, 0], [0, 1], [0, 2]], dtype=np.int64), np.array(
|
||||
[0, 1, 0], dtype=np.int64), np.array(
|
||||
[1, 3], dtype=np.int64)),
|
||||
]
|
||||
|
||||
self._testCTCDecoder(
|
||||
tf.nn.ctc_beam_search_decoder,
|
||||
inputs, seq_lens, log_prob_truth,
|
||||
ctc_ops.ctc_beam_search_decoder,
|
||||
inputs,
|
||||
seq_lens,
|
||||
log_prob_truth,
|
||||
decode_truth,
|
||||
beam_width=2,
|
||||
top_paths=2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,14 +12,20 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.ctc_ops.ctc_decoder_ops."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import sparse_tensor
|
||||
from tensorflow.python.ops import ctc_ops
|
||||
from tensorflow.python.ops import gradients_impl
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
def SimpleSparseTensorFrom(x):
|
||||
@ -37,27 +43,31 @@ def SimpleSparseTensorFrom(x):
|
||||
for time, val in enumerate(batch):
|
||||
x_ix.append([batch_i, time])
|
||||
x_val.append(val)
|
||||
x_shape = [len(x), np.asarray(x_ix).max(0)[1]+1]
|
||||
x_ix = tf.constant(x_ix, tf.int64)
|
||||
x_val = tf.constant(x_val, tf.int32)
|
||||
x_shape = tf.constant(x_shape, tf.int64)
|
||||
x_shape = [len(x), np.asarray(x_ix).max(0)[1] + 1]
|
||||
x_ix = constant_op.constant(x_ix, dtypes.int64)
|
||||
x_val = constant_op.constant(x_val, dtypes.int32)
|
||||
x_shape = constant_op.constant(x_shape, dtypes.int64)
|
||||
|
||||
return tf.SparseTensor(x_ix, x_val, x_shape)
|
||||
return sparse_tensor.SparseTensor(x_ix, x_val, x_shape)
|
||||
|
||||
|
||||
class CTCLossTest(tf.test.TestCase):
|
||||
class CTCLossTest(test.TestCase):
|
||||
|
||||
def _testCTCLoss(self, inputs, seq_lens, labels,
|
||||
loss_truth, grad_truth, expected_err_re=None):
|
||||
def _testCTCLoss(self,
|
||||
inputs,
|
||||
seq_lens,
|
||||
labels,
|
||||
loss_truth,
|
||||
grad_truth,
|
||||
expected_err_re=None):
|
||||
self.assertEquals(len(inputs), len(grad_truth))
|
||||
|
||||
inputs_t = tf.constant(inputs)
|
||||
inputs_t = constant_op.constant(inputs)
|
||||
|
||||
with self.test_session(use_gpu=False) as sess:
|
||||
loss = tf.nn.ctc_loss(inputs=inputs_t,
|
||||
labels=labels,
|
||||
sequence_length=seq_lens)
|
||||
grad = tf.gradients(loss, [inputs_t])[0]
|
||||
loss = ctc_ops.ctc_loss(
|
||||
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
|
||||
grad = gradients_impl.gradients(loss, [inputs_t])[0]
|
||||
|
||||
self.assertShapeEqual(loss_truth, loss)
|
||||
self.assertShapeEqual(grad_truth, grad)
|
||||
@ -176,9 +186,11 @@ class CTCLossTest(tf.test.TestCase):
|
||||
dtype=np.float32)
|
||||
|
||||
# len max_time_steps array of 2 x depth matrices
|
||||
inputs = [np.vstack([input_log_prob_matrix_0[t, :],
|
||||
input_log_prob_matrix_1[t, :]])
|
||||
for t in range(5)] + 2 * [np.nan*np.ones((2, depth), np.float32)]
|
||||
inputs = [
|
||||
np.vstack(
|
||||
[input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
|
||||
for t in range(5)
|
||||
] + 2 * [np.nan * np.ones((2, depth), np.float32)]
|
||||
|
||||
# convert inputs into [max_time x batch_size x depth tensor] Tensor
|
||||
inputs = np.asarray(inputs, dtype=np.float32)
|
||||
@ -193,44 +205,45 @@ class CTCLossTest(tf.test.TestCase):
|
||||
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
|
||||
|
||||
# output: len max_time_steps array of 2 x depth matrices
|
||||
grad_truth = [np.vstack([gradient_log_prob_0[t, :],
|
||||
gradient_log_prob_1[t, :]])
|
||||
for t in range(5)] + 2 * [np.zeros((2, depth), np.float32)]
|
||||
grad_truth = [
|
||||
np.vstack([gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
|
||||
for t in range(5)
|
||||
] + 2 * [np.zeros((2, depth), np.float32)]
|
||||
|
||||
# convert grad_truth into [max_time x batch_size x depth] Tensor
|
||||
grad_truth = np.asarray(grad_truth, dtype=np.float32)
|
||||
|
||||
self._testCTCLoss(inputs, seq_lens, labels, loss_truth, grad_truth)
|
||||
|
||||
|
||||
def test_time_major(self):
|
||||
"""Testing time_major param.
|
||||
|
||||
testing if transposing and setting time_major=False will result in the same loss
|
||||
|
||||
|
||||
testing if transposing and setting time_major=False will result in the same
|
||||
loss
|
||||
"""
|
||||
# [max_time x batch_size x depth tensor]
|
||||
inputs = np.random.randn(2, 2, 3).astype(np.float32)
|
||||
labels = SimpleSparseTensorFrom([[0, 1], [1, 0]])
|
||||
seq_lens = np.array([2, 2], dtype=np.int32)
|
||||
|
||||
|
||||
inputs_t = tf.constant(inputs)
|
||||
inputs_t = constant_op.constant(inputs)
|
||||
|
||||
# Transposing tensor to [batch_size x max_time x depth tensor]
|
||||
inputs_t_transposed = tf.constant(inputs.transpose(1, 0, 2))
|
||||
|
||||
inputs_t_transposed = constant_op.constant(inputs.transpose(1, 0, 2))
|
||||
|
||||
with self.test_session(use_gpu=False) as sess:
|
||||
loss = tf.nn.ctc_loss(inputs=inputs_t,
|
||||
labels=labels,
|
||||
sequence_length=seq_lens)
|
||||
loss_transposed = tf.nn.ctc_loss(inputs=inputs_t_transposed,
|
||||
labels=labels,
|
||||
sequence_length=seq_lens, time_major=False)
|
||||
loss = ctc_ops.ctc_loss(
|
||||
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
|
||||
loss_transposed = ctc_ops.ctc_loss(
|
||||
inputs=inputs_t_transposed,
|
||||
labels=labels,
|
||||
sequence_length=seq_lens,
|
||||
time_major=False)
|
||||
|
||||
(tf_loss, tf_loss_transposed) = sess.run([loss, loss_transposed])
|
||||
self.assertAllEqual(tf_loss, tf_loss_transposed)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for DecodeCSV op from parsing_ops."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
@ -20,14 +19,16 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.ops import parsing_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DecodeCSVOpTest(tf.test.TestCase):
|
||||
class DecodeCSVOpTest(test.TestCase):
|
||||
|
||||
def _test(self, args, expected_out=None, expected_err_re=None):
|
||||
with self.test_session() as sess:
|
||||
decode = tf.decode_csv(**args)
|
||||
decode = parsing_ops.decode_csv(**args)
|
||||
|
||||
if expected_err_re is None:
|
||||
out = sess.run(decode)
|
||||
@ -43,7 +44,10 @@ class DecodeCSVOpTest(tf.test.TestCase):
|
||||
sess.run(decode)
|
||||
|
||||
def testSimple(self):
|
||||
args = {"records": ["1", "2", '"3"'], "record_defaults": [[1]],}
|
||||
args = {
|
||||
"records": ["1", "2", '"3"'],
|
||||
"record_defaults": [[1]],
|
||||
}
|
||||
|
||||
expected_out = [[1, 2, 3]]
|
||||
|
||||
@ -65,8 +69,8 @@ class DecodeCSVOpTest(tf.test.TestCase):
|
||||
def testInt64(self):
|
||||
args = {
|
||||
"records": ["1", "2", '"2147483648"'],
|
||||
"record_defaults": [np.array([],
|
||||
dtype=np.int64)],
|
||||
"record_defaults": [np.array(
|
||||
[], dtype=np.int64)],
|
||||
}
|
||||
|
||||
expected_out = [[1, 2, 2147483648]]
|
||||
@ -117,22 +121,22 @@ class DecodeCSVOpTest(tf.test.TestCase):
|
||||
def testWithoutDefaultsError(self):
|
||||
args = {
|
||||
"records": [",1", "0.2,3", "3.0,"],
|
||||
"record_defaults": [[1.0], np.array([],
|
||||
dtype=np.int32)]
|
||||
"record_defaults": [[1.0], np.array(
|
||||
[], dtype=np.int32)]
|
||||
}
|
||||
|
||||
self._test(args,
|
||||
expected_err_re="Field 1 is required but missing in record 2!")
|
||||
self._test(
|
||||
args, expected_err_re="Field 1 is required but missing in record 2!")
|
||||
|
||||
def testWrongFieldIntError(self):
|
||||
args = {
|
||||
"records": [",1", "0.2,234a", "3.0,2"],
|
||||
"record_defaults": [[1.0], np.array([],
|
||||
dtype=np.int32)]
|
||||
"record_defaults": [[1.0], np.array(
|
||||
[], dtype=np.int32)]
|
||||
}
|
||||
|
||||
self._test(args,
|
||||
expected_err_re="Field 1 in record 1 is not a valid int32: 234a")
|
||||
self._test(
|
||||
args, expected_err_re="Field 1 in record 1 is not a valid int32: 234a")
|
||||
|
||||
def testOutOfRangeError(self):
|
||||
args = {
|
||||
@ -140,41 +144,39 @@ class DecodeCSVOpTest(tf.test.TestCase):
|
||||
"record_defaults": [[1]]
|
||||
}
|
||||
|
||||
self._test(args,
|
||||
expected_err_re="Field 0 in record 1 is not a valid int32: ")
|
||||
self._test(
|
||||
args, expected_err_re="Field 0 in record 1 is not a valid int32: ")
|
||||
|
||||
def testWrongFieldFloatError(self):
|
||||
args = {
|
||||
"records": [",1", "0.2,2", "3.0adf,3"],
|
||||
"record_defaults": [[1.0], np.array([],
|
||||
dtype=np.int32)]
|
||||
"record_defaults": [[1.0], np.array(
|
||||
[], dtype=np.int32)]
|
||||
}
|
||||
|
||||
self._test(args,
|
||||
expected_err_re="Field 0 in record 2 is not a valid float: ")
|
||||
self._test(
|
||||
args, expected_err_re="Field 0 in record 2 is not a valid float: ")
|
||||
|
||||
def testWrongFieldStringError(self):
|
||||
args = {"records": ['"1,a,"', "0.22", 'a"bc'], "record_defaults": [["a"]]}
|
||||
|
||||
self._test(
|
||||
args,
|
||||
expected_err_re="Unquoted fields cannot have quotes/CRLFs inside")
|
||||
args, expected_err_re="Unquoted fields cannot have quotes/CRLFs inside")
|
||||
|
||||
def testWrongDefaults(self):
|
||||
args = {
|
||||
"records": [",1", "0.2,2", "3.0adf,3"],
|
||||
"record_defaults": [[1.0]]
|
||||
}
|
||||
args = {"records": [",1", "0.2,2", "3.0adf,3"], "record_defaults": [[1.0]]}
|
||||
|
||||
self._test(args,
|
||||
expected_err_re="Expect 1 fields but have 2 in record 0")
|
||||
self._test(args, expected_err_re="Expect 1 fields but have 2 in record 0")
|
||||
|
||||
def testShortQuotedString(self):
|
||||
args = {"records": ["\""], "record_defaults": [["default"]],}
|
||||
args = {
|
||||
"records": ["\""],
|
||||
"record_defaults": [["default"]],
|
||||
}
|
||||
|
||||
self._test(args,
|
||||
expected_err_re="Quoted field has to end with quote followed.*")
|
||||
self._test(
|
||||
args, expected_err_re="Quoted field has to end with quote followed.*")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -19,13 +19,20 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import os.path
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import errors_impl
|
||||
from tensorflow.python.ops import image_ops
|
||||
from tensorflow.python.ops import io_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
# Double-quote usage here is intentional to make internal path rewriting easier.
|
||||
prefix_path = os.path.join("tensorflow", "core", "lib")
|
||||
prefix_path = os.path.join('third_party', 'tensorflow', 'core', 'lib')
|
||||
|
||||
class DecodeImageOpTest(tf.test.TestCase):
|
||||
|
||||
class DecodeImageOpTest(test.TestCase):
|
||||
|
||||
def testGif(self):
|
||||
# Read some real GIFs
|
||||
@ -36,9 +43,9 @@ class DecodeImageOpTest(tf.test.TestCase):
|
||||
shape = (12, HEIGHT, WIDTH, 3)
|
||||
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
gif0 = tf.read_file(path)
|
||||
image0 = tf.image.decode_image(gif0)
|
||||
image1 = tf.image.decode_gif(gif0)
|
||||
gif0 = io_ops.read_file(path)
|
||||
image0 = image_ops.decode_image(gif0)
|
||||
image1 = image_ops.decode_gif(gif0)
|
||||
gif0, image0, image1 = sess.run([gif0, image0, image1])
|
||||
|
||||
self.assertEqual(image0.shape, shape)
|
||||
@ -57,18 +64,17 @@ class DecodeImageOpTest(tf.test.TestCase):
|
||||
|
||||
self.assertAllClose(frame, gt)
|
||||
|
||||
bad_channels = tf.image.decode_image(gif0, channels=1)
|
||||
with self.assertRaises(tf.errors.InvalidArgumentError):
|
||||
bad_channels = image_ops.decode_image(gif0, channels=1)
|
||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||
bad_channels.eval()
|
||||
|
||||
|
||||
def testJpeg(self):
|
||||
# Read a real jpeg and verify shape
|
||||
path = os.path.join(prefix_path, 'jpeg', 'testdata', 'jpeg_merge_test1.jpg')
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
jpeg0 = tf.read_file(path)
|
||||
image0 = tf.image.decode_image(jpeg0)
|
||||
image1 = tf.image.decode_jpeg(jpeg0)
|
||||
jpeg0 = io_ops.read_file(path)
|
||||
image0 = image_ops.decode_image(jpeg0)
|
||||
image1 = image_ops.decode_jpeg(jpeg0)
|
||||
jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
|
||||
self.assertEqual(len(jpeg0), 3771)
|
||||
self.assertEqual(image0.shape, (256, 128, 3))
|
||||
@ -81,25 +87,25 @@ class DecodeImageOpTest(tf.test.TestCase):
|
||||
for channels in 0, 1, 3:
|
||||
with self.test_session(use_gpu=True) as sess:
|
||||
path = os.path.join(prefix_path, 'png', 'testdata', filename)
|
||||
png0 = tf.read_file(path)
|
||||
image0 = tf.image.decode_image(png0, channels=channels)
|
||||
image1 = tf.image.decode_png(png0, channels=channels)
|
||||
png0 = io_ops.read_file(path)
|
||||
image0 = image_ops.decode_image(png0, channels=channels)
|
||||
image1 = image_ops.decode_png(png0, channels=channels)
|
||||
png0, image0, image1 = sess.run([png0, image0, image1])
|
||||
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
|
||||
self.assertAllEqual(image0, image1)
|
||||
|
||||
def testInvalidBytes(self):
|
||||
image_bytes = b'ThisIsNotAnImage!'
|
||||
decode = tf.image.decode_image(image_bytes)
|
||||
decode = image_ops.decode_image(image_bytes)
|
||||
with self.test_session():
|
||||
with self.assertRaises(tf.errors.InvalidArgumentError):
|
||||
with self.assertRaises(errors_impl.InvalidArgumentError):
|
||||
decode.eval()
|
||||
|
||||
def testInvalidChannels(self):
|
||||
image_bytes = b'unused'
|
||||
with self.assertRaises(ValueError):
|
||||
decode = tf.image.decode_image(image_bytes, channels=4)
|
||||
decode = image_ops.decode_image(image_bytes, channels=4)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
if __name__ == '__main__':
|
||||
test.main()
|
||||
|
@ -18,25 +18,33 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import image_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DecodePngOpTest(tf.test.TestCase):
|
||||
class DecodePngOpTest(test.TestCase):
|
||||
|
||||
def test16bit(self):
|
||||
img_bytes = [[0, 255], [1024, 1024 + 255]]
|
||||
# Encoded PNG bytes resulting from encoding the above img_bytes
|
||||
# using go's image/png encoder.
|
||||
encoded_bytes = [137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68,
|
||||
82, 0, 0, 0, 2, 0, 0, 0, 2, 16, 0, 0, 0, 0, 7, 77, 142,
|
||||
187, 0, 0, 0, 21, 73, 68, 65, 84, 120, 156, 98, 98, 96, 96,
|
||||
248, 207, 194, 2, 36, 1, 1, 0, 0, 255, 255, 6, 60, 1, 10,
|
||||
68, 160, 26, 131, 0, 0, 0, 0, 73, 69, 78, 68, 174, 66, 96,
|
||||
130]
|
||||
encoded_bytes = [
|
||||
137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 0,
|
||||
2, 0, 0, 0, 2, 16, 0, 0, 0, 0, 7, 77, 142, 187, 0, 0, 0, 21, 73, 68, 65,
|
||||
84, 120, 156, 98, 98, 96, 96, 248, 207, 194, 2, 36, 1, 1, 0, 0, 255,
|
||||
255, 6, 60, 1, 10, 68, 160, 26, 131, 0, 0, 0, 0, 73, 69, 78, 68, 174,
|
||||
66, 96, 130
|
||||
]
|
||||
|
||||
byte_string = bytes(bytearray(encoded_bytes))
|
||||
img_in = tf.constant(byte_string, dtype=tf.string)
|
||||
decode = tf.squeeze(tf.image.decode_png(img_in, dtype=tf.uint16))
|
||||
img_in = constant_op.constant(byte_string, dtype=dtypes.string)
|
||||
decode = array_ops.squeeze(
|
||||
image_ops.decode_png(
|
||||
img_in, dtype=dtypes.uint16))
|
||||
|
||||
with self.test_session():
|
||||
decoded = decode.eval()
|
||||
@ -44,4 +52,4 @@ class DecodePngOpTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for DecodeRaw op from parsing_ops."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
@ -20,15 +19,19 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import parsing_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DecodeRawOpTest(tf.test.TestCase):
|
||||
class DecodeRawOpTest(test.TestCase):
|
||||
|
||||
def testToUint8(self):
|
||||
with self.test_session():
|
||||
in_bytes = tf.placeholder(tf.string, shape=[2])
|
||||
decode = tf.decode_raw(in_bytes, out_type=tf.uint8)
|
||||
in_bytes = array_ops.placeholder(dtypes.string, shape=[2])
|
||||
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.uint8)
|
||||
self.assertEqual([2, None], decode.get_shape().as_list())
|
||||
|
||||
result = decode.eval(feed_dict={in_bytes: ["A", "a"]})
|
||||
@ -45,13 +48,13 @@ class DecodeRawOpTest(tf.test.TestCase):
|
||||
|
||||
def testToInt16(self):
|
||||
with self.test_session():
|
||||
in_bytes = tf.placeholder(tf.string, shape=[None])
|
||||
decode = tf.decode_raw(in_bytes, out_type=tf.int16)
|
||||
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
|
||||
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.int16)
|
||||
self.assertEqual([None, None], decode.get_shape().as_list())
|
||||
|
||||
result = decode.eval(feed_dict={in_bytes: ["AaBC"]})
|
||||
self.assertAllEqual([[ord("A") + ord("a") * 256,
|
||||
ord("B") + ord("C") * 256]], result)
|
||||
self.assertAllEqual(
|
||||
[[ord("A") + ord("a") * 256, ord("B") + ord("C") * 256]], result)
|
||||
|
||||
with self.assertRaisesOpError(
|
||||
"Input to DecodeRaw has length 3 that is not a multiple of 2, the "
|
||||
@ -60,8 +63,8 @@ class DecodeRawOpTest(tf.test.TestCase):
|
||||
|
||||
def testToFloat16(self):
|
||||
with self.test_session():
|
||||
in_bytes = tf.placeholder(tf.string, shape=[None])
|
||||
decode = tf.decode_raw(in_bytes, out_type=tf.float16)
|
||||
in_bytes = array_ops.placeholder(dtypes.string, shape=[None])
|
||||
decode = parsing_ops.decode_raw(in_bytes, out_type=dtypes.float16)
|
||||
self.assertEqual([None, None], decode.get_shape().as_list())
|
||||
|
||||
expected_result = np.matrix([[1, -2, -3, 4]], dtype=np.float16)
|
||||
@ -69,5 +72,6 @@ class DecodeRawOpTest(tf.test.TestCase):
|
||||
|
||||
self.assertAllEqual(expected_result, result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,19 +12,21 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for denormal handling."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.platform import control_imports
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DenormalTest(tf.test.TestCase):
|
||||
class DenormalTest(test.TestCase):
|
||||
|
||||
def testPythonHasDenormals(self):
|
||||
"""Non-tf numpy code should treat denormals correctly."""
|
||||
@ -37,12 +39,12 @@ class DenormalTest(tf.test.TestCase):
|
||||
# TODO(irving): Fix denormal flushing for open source.
|
||||
return
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
tf.identity(7).eval()
|
||||
array_ops.identity(7).eval()
|
||||
for dtype in dtypes:
|
||||
tiny = np.finfo(dtype).tiny
|
||||
# Small shape to test main thread, large shape to test thread pool
|
||||
for shape in (), (1<<20,):
|
||||
flush = 0.1 * tf.constant(tiny, shape=shape)
|
||||
for shape in (), (1 << 20,):
|
||||
flush = 0.1 * constant_op.constant(tiny, shape=shape)
|
||||
self.assertAllEqual(flush.eval(), np.zeros(shape))
|
||||
# Make sure the flags don't leak out
|
||||
self.testPythonHasDenormals()
|
||||
@ -57,4 +59,4 @@ class DenormalTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,33 +12,43 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for state updating ops that may have benign race conditions."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import state_ops
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class AssignOpTest(tf.test.TestCase):
|
||||
class AssignOpTest(test.TestCase):
|
||||
|
||||
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
|
||||
# contain benign and deliberate data races when multiple threads update
|
||||
# the same parameters without a lock.
|
||||
def testParallelUpdateWithoutLocking(self):
|
||||
with self.test_session() as sess:
|
||||
ones_t = tf.fill([1024, 1024], 1.0)
|
||||
p = tf.Variable(tf.zeros([1024, 1024]))
|
||||
adds = [tf.assign_add(p, ones_t, use_locking=False)
|
||||
for _ in range(20)]
|
||||
tf.global_variables_initializer().run()
|
||||
ones_t = array_ops.fill([1024, 1024], 1.0)
|
||||
p = variables.Variable(array_ops.zeros([1024, 1024]))
|
||||
adds = [
|
||||
state_ops.assign_add(
|
||||
p, ones_t, use_locking=False) for _ in range(20)
|
||||
]
|
||||
variables.global_variables_initializer().run()
|
||||
|
||||
def run_add(add_op):
|
||||
sess.run(add_op)
|
||||
threads = [self.checkedThread(target=run_add, args=(add_op,))
|
||||
for add_op in adds]
|
||||
|
||||
threads = [
|
||||
self.checkedThread(
|
||||
target=run_add, args=(add_op,)) for add_op in adds
|
||||
]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
@ -51,16 +61,21 @@ class AssignOpTest(tf.test.TestCase):
|
||||
|
||||
def testParallelAssignWithoutLocking(self):
|
||||
with self.test_session() as sess:
|
||||
ones_t = tf.fill([1024, 1024], float(1))
|
||||
p = tf.Variable(tf.zeros([1024, 1024]))
|
||||
assigns = [tf.assign(p, tf.mul(ones_t, float(i)), False)
|
||||
for i in range(1, 21)]
|
||||
tf.global_variables_initializer().run()
|
||||
ones_t = array_ops.fill([1024, 1024], float(1))
|
||||
p = variables.Variable(array_ops.zeros([1024, 1024]))
|
||||
assigns = [
|
||||
state_ops.assign(p, math_ops.mul(ones_t, float(i)), False)
|
||||
for i in range(1, 21)
|
||||
]
|
||||
variables.global_variables_initializer().run()
|
||||
|
||||
def run_assign(assign_op):
|
||||
sess.run(assign_op)
|
||||
threads = [self.checkedThread(target=run_assign, args=(assign_op,))
|
||||
for assign_op in assigns]
|
||||
|
||||
threads = [
|
||||
self.checkedThread(
|
||||
target=run_assign, args=(assign_op,)) for assign_op in assigns
|
||||
]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
@ -74,4 +89,4 @@ class AssignOpTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,24 +12,30 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.ops.tf.Assign*."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import state_ops
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class AssignOpTest(tf.test.TestCase):
|
||||
class AssignOpTest(test.TestCase):
|
||||
|
||||
def _initAssignFetch(self, x, y, use_gpu=False):
|
||||
"""Initialize a param to init and update it with y."""
|
||||
super(AssignOpTest, self).setUp()
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
p = tf.Variable(x)
|
||||
assign = tf.assign(p, y)
|
||||
p = variables.Variable(x)
|
||||
assign = state_ops.assign(p, y)
|
||||
p.initializer.run()
|
||||
new_value = assign.eval()
|
||||
return p.eval(), new_value
|
||||
@ -37,8 +43,8 @@ class AssignOpTest(tf.test.TestCase):
|
||||
def _initAssignAddFetch(self, x, y, use_gpu=False):
|
||||
"""Initialize a param to init, and compute param += y."""
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
p = tf.Variable(x)
|
||||
add = tf.assign_add(p, y)
|
||||
p = variables.Variable(x)
|
||||
add = state_ops.assign_add(p, y)
|
||||
p.initializer.run()
|
||||
new_value = add.eval()
|
||||
return p.eval(), new_value
|
||||
@ -46,8 +52,8 @@ class AssignOpTest(tf.test.TestCase):
|
||||
def _initAssignSubFetch(self, x, y, use_gpu=False):
|
||||
"""Initialize a param to init, and compute param -= y."""
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
p = tf.Variable(x)
|
||||
sub = tf.assign_sub(p, y)
|
||||
p = variables.Variable(x)
|
||||
sub = state_ops.assign_sub(p, y)
|
||||
p.initializer.run()
|
||||
new_value = sub.eval()
|
||||
return p.eval(), new_value
|
||||
@ -65,7 +71,7 @@ class AssignOpTest(tf.test.TestCase):
|
||||
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
|
||||
self.assertAllEqual(x - y, var_value)
|
||||
self.assertAllEqual(x - y, op_value)
|
||||
if tf.test.is_built_with_cuda() and dtype in [np.float32, np.float64]:
|
||||
if test.is_built_with_cuda() and dtype in [np.float32, np.float64]:
|
||||
var_value, op_value = self._initAssignFetch(x, y, use_gpu=True)
|
||||
self.assertAllEqual(y, var_value)
|
||||
self.assertAllEqual(y, op_value)
|
||||
@ -81,31 +87,29 @@ class AssignOpTest(tf.test.TestCase):
|
||||
|
||||
def testAssignNonStrictShapeChecking(self):
|
||||
with self.test_session():
|
||||
data = tf.fill([1024, 1024], 0)
|
||||
p = tf.Variable([1])
|
||||
a = tf.assign(p, data, validate_shape=False)
|
||||
data = array_ops.fill([1024, 1024], 0)
|
||||
p = variables.Variable([1])
|
||||
a = state_ops.assign(p, data, validate_shape=False)
|
||||
a.op.run()
|
||||
self.assertAllEqual(p.eval(), data.eval())
|
||||
|
||||
# Assign to yet another shape
|
||||
data2 = tf.fill([10, 10], 1)
|
||||
a2 = tf.assign(p, data2, validate_shape=False)
|
||||
data2 = array_ops.fill([10, 10], 1)
|
||||
a2 = state_ops.assign(p, data2, validate_shape=False)
|
||||
a2.op.run()
|
||||
self.assertAllEqual(p.eval(), data2.eval())
|
||||
|
||||
def testInitRequiredAssignAdd(self):
|
||||
with self.test_session():
|
||||
p = tf.Variable(tf.fill([1024, 1024], 1),
|
||||
tf.int32)
|
||||
a = tf.assign_add(p, tf.fill([1024, 1024], 0))
|
||||
p = variables.Variable(array_ops.fill([1024, 1024], 1), dtypes.int32)
|
||||
a = state_ops.assign_add(p, array_ops.fill([1024, 1024], 0))
|
||||
with self.assertRaisesOpError("use uninitialized"):
|
||||
a.op.run()
|
||||
|
||||
def testInitRequiredAssignSub(self):
|
||||
with self.test_session():
|
||||
p = tf.Variable(tf.fill([1024, 1024], 1),
|
||||
tf.int32)
|
||||
a = tf.assign_sub(p, tf.fill([1024, 1024], 0))
|
||||
p = variables.Variable(array_ops.fill([1024, 1024], 1), dtypes.int32)
|
||||
a = state_ops.assign_sub(p, array_ops.fill([1024, 1024], 0))
|
||||
with self.assertRaisesOpError("use uninitialized"):
|
||||
a.op.run()
|
||||
|
||||
@ -114,17 +118,22 @@ class AssignOpTest(tf.test.TestCase):
|
||||
# data race and must run without TSAN.
|
||||
def testParallelUpdateWithLocking(self):
|
||||
with self.test_session() as sess:
|
||||
zeros_t = tf.fill([1024, 1024], 0.0)
|
||||
ones_t = tf.fill([1024, 1024], 1.0)
|
||||
p = tf.Variable(zeros_t)
|
||||
adds = [tf.assign_add(p, ones_t, use_locking=True)
|
||||
for _ in range(20)]
|
||||
zeros_t = array_ops.fill([1024, 1024], 0.0)
|
||||
ones_t = array_ops.fill([1024, 1024], 1.0)
|
||||
p = variables.Variable(zeros_t)
|
||||
adds = [
|
||||
state_ops.assign_add(
|
||||
p, ones_t, use_locking=True) for _ in range(20)
|
||||
]
|
||||
p.initializer.run()
|
||||
|
||||
def run_add(add_op):
|
||||
sess.run(add_op)
|
||||
|
||||
threads = [
|
||||
self.checkedThread(target=run_add, args=(add_op,)) for add_op in adds]
|
||||
self.checkedThread(
|
||||
target=run_add, args=(add_op,)) for add_op in adds
|
||||
]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
@ -139,18 +148,23 @@ class AssignOpTest(tf.test.TestCase):
|
||||
# which contains a benign data race and must run without TSAN.
|
||||
def testParallelAssignWithLocking(self):
|
||||
with self.test_session() as sess:
|
||||
zeros_t = tf.fill([1024, 1024], 0.0)
|
||||
ones_t = tf.fill([1024, 1024], 1.0)
|
||||
p = tf.Variable(zeros_t)
|
||||
assigns = [tf.assign(p, tf.mul(ones_t, float(i)),
|
||||
use_locking=True)
|
||||
for i in range(1, 21)]
|
||||
zeros_t = array_ops.fill([1024, 1024], 0.0)
|
||||
ones_t = array_ops.fill([1024, 1024], 1.0)
|
||||
p = variables.Variable(zeros_t)
|
||||
assigns = [
|
||||
state_ops.assign(
|
||||
p, math_ops.mul(ones_t, float(i)), use_locking=True)
|
||||
for i in range(1, 21)
|
||||
]
|
||||
p.initializer.run()
|
||||
|
||||
def run_assign(assign_op):
|
||||
sess.run(assign_op)
|
||||
threads = [self.checkedThread(target=run_assign, args=(assign_op,))
|
||||
for assign_op in assigns]
|
||||
|
||||
threads = [
|
||||
self.checkedThread(
|
||||
target=run_assign, args=(assign_op,)) for assign_op in assigns
|
||||
]
|
||||
for t in threads:
|
||||
t.start()
|
||||
for t in threads:
|
||||
@ -165,4 +179,4 @@ class AssignOpTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -20,14 +20,20 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DepthToSpaceTest(tf.test.TestCase):
|
||||
class DepthToSpaceTest(test.TestCase):
|
||||
|
||||
def _testOne(self, inputs, block_size, outputs):
|
||||
with self.test_session(use_gpu=True):
|
||||
x_tf = tf.depth_to_space(tf.to_float(inputs), block_size)
|
||||
x_tf = array_ops.depth_to_space(math_ops.to_float(inputs), block_size)
|
||||
self.assertAllEqual(x_tf.eval(), outputs)
|
||||
|
||||
def testBasic(self):
|
||||
@ -137,7 +143,7 @@ class DepthToSpaceTest(tf.test.TestCase):
|
||||
# Raise an exception, since th depth is only 4 and needs to be
|
||||
# divisible by 16.
|
||||
with self.assertRaises(ValueError):
|
||||
out_tf = tf.depth_to_space(x_np, block_size)
|
||||
out_tf = array_ops.depth_to_space(x_np, block_size)
|
||||
out_tf.eval()
|
||||
|
||||
# Test when the block size is 0.
|
||||
@ -146,7 +152,7 @@ class DepthToSpaceTest(tf.test.TestCase):
|
||||
[[3], [4]]]]
|
||||
block_size = 0
|
||||
with self.assertRaises(ValueError):
|
||||
out_tf = tf.depth_to_space(x_np, block_size)
|
||||
out_tf = array_ops.depth_to_space(x_np, block_size)
|
||||
out_tf.eval()
|
||||
|
||||
# Test when the block size is 1. The block size should be > 1.
|
||||
@ -157,7 +163,7 @@ class DepthToSpaceTest(tf.test.TestCase):
|
||||
[4, 4, 4, 4]]]]
|
||||
block_size = 1
|
||||
with self.assertRaises(ValueError):
|
||||
out_tf = tf.depth_to_space(x_np, block_size)
|
||||
out_tf = array_ops.depth_to_space(x_np, block_size)
|
||||
out_tf.eval()
|
||||
|
||||
def testBlockSizeLargerThanInput(self):
|
||||
@ -166,7 +172,7 @@ class DepthToSpaceTest(tf.test.TestCase):
|
||||
[[3], [4]]]]
|
||||
block_size = 10
|
||||
with self.assertRaises(ValueError):
|
||||
out_tf = tf.space_to_depth(x_np, block_size)
|
||||
out_tf = array_ops.space_to_depth(x_np, block_size)
|
||||
out_tf.eval()
|
||||
|
||||
def testBlockSizeNotDivisibleDepth(self):
|
||||
@ -177,23 +183,23 @@ class DepthToSpaceTest(tf.test.TestCase):
|
||||
[4, 4, 4, 4]]]]
|
||||
block_size = 3
|
||||
with self.assertRaises(ValueError):
|
||||
_ = tf.space_to_depth(x_np, block_size)
|
||||
_ = array_ops.space_to_depth(x_np, block_size)
|
||||
|
||||
def testUnknownShape(self):
|
||||
t = tf.depth_to_space(tf.placeholder(tf.float32), block_size=4)
|
||||
t = array_ops.depth_to_space(array_ops.placeholder(dtypes.float32), block_size=4)
|
||||
self.assertEqual(4, t.get_shape().ndims)
|
||||
|
||||
|
||||
class DepthToSpaceGradientTest(tf.test.TestCase):
|
||||
class DepthToSpaceGradientTest(test.TestCase):
|
||||
|
||||
# Check the gradients.
|
||||
def _checkGrad(self, x, block_size):
|
||||
assert 4 == x.ndim
|
||||
with self.test_session(use_gpu=True):
|
||||
tf_x = tf.convert_to_tensor(x)
|
||||
tf_y = tf.depth_to_space(tf_x, block_size)
|
||||
tf_x = ops.convert_to_tensor(x)
|
||||
tf_y = array_ops.depth_to_space(tf_x, block_size)
|
||||
epsilon = 1e-2
|
||||
((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
|
||||
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
|
||||
tf_x,
|
||||
x.shape,
|
||||
tf_y,
|
||||
@ -225,4 +231,4 @@ class DepthToSpaceGradientTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -13,12 +13,20 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Functional tests for depthwise convolutional operations."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import nn_impl
|
||||
from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
def ConfigsToTest():
|
||||
@ -72,7 +80,7 @@ def CheckGradConfigsToTest():
|
||||
yield i, f, o, s, p
|
||||
|
||||
|
||||
class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
class DepthwiseConv2DTest(test.TestCase):
|
||||
|
||||
# This is testing against the output of the implementation using the
|
||||
# combination of conv_2d and slicing ops.
|
||||
@ -100,19 +108,14 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
|
||||
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
|
||||
with self.test_session(use_gpu=use_gpu) as sess:
|
||||
t1 = tf.constant(x1, shape=tensor_in_sizes)
|
||||
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
|
||||
t1.set_shape(tensor_in_sizes)
|
||||
t2 = tf.constant(x2, shape=filter_in_sizes)
|
||||
conv_native = tf.nn.depthwise_conv2d_native(
|
||||
t1,
|
||||
t2,
|
||||
strides=[1, stride, stride, 1],
|
||||
padding=padding)
|
||||
t2 = constant_op.constant(x2, shape=filter_in_sizes)
|
||||
conv_native = nn_ops.depthwise_conv2d_native(
|
||||
t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
||||
|
||||
conv_gold = tf.nn.depthwise_conv2d(t1,
|
||||
t2,
|
||||
strides=[1, stride, stride, 1],
|
||||
padding=padding)
|
||||
conv_gold = nn_impl.depthwise_conv2d(
|
||||
t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
||||
native_result = sess.run(conv_native)
|
||||
gold_result = sess.run(conv_gold)
|
||||
|
||||
@ -127,18 +130,13 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
padding) in enumerate(ConfigsToTest()):
|
||||
print("Processing ", index, "th config.")
|
||||
if index == 2:
|
||||
self._VerifyValues(input_size,
|
||||
filter_size,
|
||||
stride,
|
||||
padding,
|
||||
use_gpu=True)
|
||||
self._VerifyValues(input_size,
|
||||
filter_size,
|
||||
stride,
|
||||
padding,
|
||||
use_gpu=False)
|
||||
self._VerifyValues(
|
||||
input_size, filter_size, stride, padding, use_gpu=True)
|
||||
self._VerifyValues(
|
||||
input_size, filter_size, stride, padding, use_gpu=False)
|
||||
|
||||
# This is testing against hand calculated results.
|
||||
|
||||
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
|
||||
expected, use_gpu):
|
||||
"""Verifies the output values of the depthwise convolution function.
|
||||
@ -164,13 +162,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
|
||||
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
|
||||
with self.test_session(use_gpu=use_gpu) as sess:
|
||||
t1 = tf.constant(x1, shape=tensor_in_sizes)
|
||||
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
|
||||
t1.set_shape(tensor_in_sizes)
|
||||
t2 = tf.constant(x2, shape=filter_in_sizes)
|
||||
conv = tf.nn.depthwise_conv2d_native(t1,
|
||||
t2,
|
||||
strides=[1, stride, stride, 1],
|
||||
padding=padding)
|
||||
t2 = constant_op.constant(x2, shape=filter_in_sizes)
|
||||
conv = nn_ops.depthwise_conv2d_native(
|
||||
t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
||||
value = sess.run(conv)
|
||||
print("value = ", value)
|
||||
self.assertArrayNear(expected, np.ravel(value), 1e-5)
|
||||
@ -226,19 +222,21 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
|
||||
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
|
||||
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
|
||||
self._VerifyHandValues(tensor_in_sizes=[1, 2, 3, 2],
|
||||
filter_in_sizes=[2, 2, 2, 2],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output,
|
||||
use_gpu=False)
|
||||
self._VerifyHandValues(
|
||||
tensor_in_sizes=[1, 2, 3, 2],
|
||||
filter_in_sizes=[2, 2, 2, 2],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output,
|
||||
use_gpu=False)
|
||||
|
||||
self._VerifyHandValues(tensor_in_sizes=[1, 2, 3, 2],
|
||||
filter_in_sizes=[2, 2, 2, 2],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output,
|
||||
use_gpu=True)
|
||||
self._VerifyHandValues(
|
||||
tensor_in_sizes=[1, 2, 3, 2],
|
||||
filter_in_sizes=[2, 2, 2, 2],
|
||||
stride=1,
|
||||
padding="VALID",
|
||||
expected=expected_output,
|
||||
use_gpu=True)
|
||||
|
||||
# Gradient checkers.This tests depthwise gradient computations for both
|
||||
# BackpropFilter and BackpropInput by comparing gradients computed by the
|
||||
@ -257,32 +255,31 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
|
||||
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
if data_type == tf.float32:
|
||||
if data_type == dtypes.float32:
|
||||
tolerance = 0.002
|
||||
else:
|
||||
self.assertEqual(data_type, tf.float64)
|
||||
self.assertEqual(data_type, dtypes.float64)
|
||||
tolerance = 1e-8
|
||||
|
||||
input_tensor = tf.constant(input_data,
|
||||
shape=input_shape,
|
||||
dtype=data_type,
|
||||
name="input")
|
||||
filter_tensor = tf.constant(filter_data,
|
||||
shape=filter_shape,
|
||||
dtype=data_type,
|
||||
name="filter")
|
||||
depthwise_conv2d = tf.nn.depthwise_conv2d_native(input_tensor,
|
||||
filter_tensor,
|
||||
[1, stride, stride, 1],
|
||||
padding,
|
||||
name="depthwise_conv2d")
|
||||
input_tensor = constant_op.constant(
|
||||
input_data, shape=input_shape, dtype=data_type, name="input")
|
||||
filter_tensor = constant_op.constant(
|
||||
filter_data, shape=filter_shape, dtype=data_type, name="filter")
|
||||
depthwise_conv2d = nn_ops.depthwise_conv2d_native(
|
||||
input_tensor,
|
||||
filter_tensor, [1, stride, stride, 1],
|
||||
padding,
|
||||
name="depthwise_conv2d")
|
||||
self.assertEqual(output_shape, depthwise_conv2d.get_shape())
|
||||
if test_input:
|
||||
err = tf.test.compute_gradient_error(input_tensor, input_shape,
|
||||
depthwise_conv2d, output_shape)
|
||||
err = gradient_checker.compute_gradient_error(input_tensor, input_shape,
|
||||
depthwise_conv2d,
|
||||
output_shape)
|
||||
else:
|
||||
err = tf.test.compute_gradient_error(filter_tensor, filter_shape,
|
||||
depthwise_conv2d, output_shape)
|
||||
err = gradient_checker.compute_gradient_error(filter_tensor,
|
||||
filter_shape,
|
||||
depthwise_conv2d,
|
||||
output_shape)
|
||||
print("depthwise conv_2d gradient error = ", err)
|
||||
self.assertLess(err, tolerance)
|
||||
|
||||
@ -291,28 +288,30 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
padding) in enumerate(CheckGradConfigsToTest()):
|
||||
print("Processing ", index, "th config.")
|
||||
for use_gpu in [True, False]:
|
||||
self._ConstructAndTestGradient(input_size,
|
||||
filter_size,
|
||||
output_size,
|
||||
stride,
|
||||
padding,
|
||||
tf.float32,
|
||||
test_input=True,
|
||||
use_gpu=use_gpu)
|
||||
self._ConstructAndTestGradient(
|
||||
input_size,
|
||||
filter_size,
|
||||
output_size,
|
||||
stride,
|
||||
padding,
|
||||
dtypes.float32,
|
||||
test_input=True,
|
||||
use_gpu=use_gpu)
|
||||
|
||||
def testDepthwiseConv2DFilterGrad(self):
|
||||
for index, (input_size, filter_size, output_size, stride,
|
||||
padding) in enumerate(CheckGradConfigsToTest()):
|
||||
print("Processing ", index, "th config.")
|
||||
for use_gpu in [True, False]:
|
||||
self._ConstructAndTestGradient(input_size,
|
||||
filter_size,
|
||||
output_size,
|
||||
stride,
|
||||
padding,
|
||||
tf.float32,
|
||||
test_input=False,
|
||||
use_gpu=use_gpu)
|
||||
self._ConstructAndTestGradient(
|
||||
input_size,
|
||||
filter_size,
|
||||
output_size,
|
||||
stride,
|
||||
padding,
|
||||
dtypes.float32,
|
||||
test_input=False,
|
||||
use_gpu=use_gpu)
|
||||
|
||||
def _CompareBackpropInputFloat(self, input_sizes, filter_sizes, output_sizes,
|
||||
stride, padding):
|
||||
@ -321,15 +320,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
|
||||
def _GetVal(use_gpu):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
t0 = tf.constant(input_sizes, shape=[len(input_sizes)])
|
||||
t1 = tf.constant(x1, shape=filter_sizes)
|
||||
t2 = tf.constant(x2, shape=output_sizes)
|
||||
backprop = tf.nn.depthwise_conv2d_native_backprop_input(
|
||||
t0,
|
||||
t1,
|
||||
t2,
|
||||
strides=[1, stride, stride, 1],
|
||||
padding=padding)
|
||||
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
|
||||
t1 = constant_op.constant(x1, shape=filter_sizes)
|
||||
t2 = constant_op.constant(x2, shape=output_sizes)
|
||||
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
|
||||
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
||||
ret = backprop.eval()
|
||||
self.assertShapeEqual(ret, backprop)
|
||||
return ret
|
||||
@ -345,15 +340,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
|
||||
def _GetVal(use_gpu):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
t0 = tf.constant(input_sizes, shape=[len(input_sizes)])
|
||||
t1 = tf.constant(x1, shape=filter_sizes)
|
||||
t2 = tf.constant(x2, shape=output_sizes)
|
||||
backprop = tf.nn.depthwise_conv2d_native_backprop_input(
|
||||
t0,
|
||||
t1,
|
||||
t2,
|
||||
strides=[1, stride, stride, 1],
|
||||
padding=padding)
|
||||
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
|
||||
t1 = constant_op.constant(x1, shape=filter_sizes)
|
||||
t2 = constant_op.constant(x2, shape=output_sizes)
|
||||
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
|
||||
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
||||
ret = backprop.eval()
|
||||
self.assertShapeEqual(ret, backprop)
|
||||
return ret
|
||||
@ -378,15 +369,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
|
||||
def _GetVal(use_gpu):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
t0 = tf.constant(x0, shape=input_sizes)
|
||||
t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
|
||||
t2 = tf.constant(x2, shape=output_sizes)
|
||||
backprop = tf.nn.depthwise_conv2d_native_backprop_filter(
|
||||
t0,
|
||||
t1,
|
||||
t2,
|
||||
strides=[1, stride, stride, 1],
|
||||
padding=padding)
|
||||
t0 = constant_op.constant(x0, shape=input_sizes)
|
||||
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
|
||||
t2 = constant_op.constant(x2, shape=output_sizes)
|
||||
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
|
||||
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
||||
ret = backprop.eval()
|
||||
self.assertShapeEqual(ret, backprop)
|
||||
return ret
|
||||
@ -402,15 +389,11 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
|
||||
def _GetVal(use_gpu):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
t0 = tf.constant(x0, shape=input_sizes)
|
||||
t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
|
||||
t2 = tf.constant(x2, shape=output_sizes)
|
||||
backprop = tf.nn.depthwise_conv2d_native_backprop_filter(
|
||||
t0,
|
||||
t1,
|
||||
t2,
|
||||
strides=[1, stride, stride, 1],
|
||||
padding=padding)
|
||||
t0 = constant_op.constant(x0, shape=input_sizes)
|
||||
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
|
||||
t2 = constant_op.constant(x2, shape=output_sizes)
|
||||
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
|
||||
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
|
||||
ret = backprop.eval()
|
||||
self.assertShapeEqual(ret, backprop)
|
||||
return ret
|
||||
@ -428,6 +411,5 @@ class DepthwiseConv2DTest(tf.test.TestCase):
|
||||
self._CompareBackpropFilterDouble(input_size, filter_size, output_size,
|
||||
stride, padding)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,17 +12,20 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.ops.tf.MatrixDeterminant."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.ops import linalg_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DeterminantOpTest(tf.test.TestCase):
|
||||
class DeterminantOpTest(test.TestCase):
|
||||
|
||||
def _compareDeterminantBase(self, matrix_x, tf_ans):
|
||||
out = tf_ans.eval()
|
||||
@ -36,16 +39,18 @@ class DeterminantOpTest(tf.test.TestCase):
|
||||
|
||||
def _compareDeterminant(self, matrix_x):
|
||||
with self.test_session():
|
||||
self._compareDeterminantBase(matrix_x, tf.matrix_determinant(matrix_x))
|
||||
self._compareDeterminantBase(matrix_x,
|
||||
linalg_ops.matrix_determinant(matrix_x))
|
||||
|
||||
def testBasic(self):
|
||||
# 2x2 matrices
|
||||
self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float32))
|
||||
self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float32))
|
||||
# 5x5 matrices (Eigen forces LU decomposition)
|
||||
self._compareDeterminant(np.array(
|
||||
[[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [2., 5., 8., 3., 8.],
|
||||
[1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float32))
|
||||
self._compareDeterminant(
|
||||
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
|
||||
2., 5., 8., 3., 8.
|
||||
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float32))
|
||||
# A multidimensional batch of 2x2 matrices
|
||||
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float32))
|
||||
|
||||
@ -54,9 +59,10 @@ class DeterminantOpTest(tf.test.TestCase):
|
||||
self._compareDeterminant(np.array([[2., 3.], [3., 4.]]).astype(np.float64))
|
||||
self._compareDeterminant(np.array([[0., 0.], [0., 0.]]).astype(np.float64))
|
||||
# 5x5 matrices (Eigen forces LU decomposition)
|
||||
self._compareDeterminant(np.array(
|
||||
[[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [2., 5., 8., 3., 8.],
|
||||
[1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float64))
|
||||
self._compareDeterminant(
|
||||
np.array([[2., 3., 4., 5., 6.], [3., 4., 9., 2., 0.], [
|
||||
2., 5., 8., 3., 8.
|
||||
], [1., 6., 7., 4., 7.], [2., 3., 4., 5., 6.]]).astype(np.float64))
|
||||
# A multidimensional batch of 2x2 matrices
|
||||
self._compareDeterminant(np.random.rand(3, 4, 5, 2, 2).astype(np.float64))
|
||||
|
||||
@ -70,14 +76,14 @@ class DeterminantOpTest(tf.test.TestCase):
|
||||
# When the determinant of a non-square matrix is attempted we should return
|
||||
# an error
|
||||
with self.assertRaises(ValueError):
|
||||
tf.matrix_determinant(
|
||||
linalg_ops.matrix_determinant(
|
||||
np.array([[1., 2., 3.], [3., 5., 4.]]).astype(np.float32))
|
||||
|
||||
def testWrongDimensions(self):
|
||||
# The input to the determinant should be a 2-dimensional tensor.
|
||||
tensor1 = tf.constant([1., 2.])
|
||||
tensor1 = constant_op.constant([1., 2.])
|
||||
with self.assertRaises(ValueError):
|
||||
tf.matrix_determinant(tensor1)
|
||||
linalg_ops.matrix_determinant(tensor1)
|
||||
|
||||
def testEmpty(self):
|
||||
self._compareDeterminant(np.empty([0, 2, 2]))
|
||||
@ -85,4 +91,4 @@ class DeterminantOpTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -18,17 +18,25 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes as dtypes_lib
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradient_checker
|
||||
from tensorflow.python.ops import gradients_impl
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.platform import tf_logging
|
||||
|
||||
|
||||
class MatrixDiagTest(tf.test.TestCase):
|
||||
class MatrixDiagTest(test.TestCase):
|
||||
_use_gpu = False
|
||||
|
||||
def testVector(self):
|
||||
with self.test_session(use_gpu=self._use_gpu):
|
||||
v = np.array([1.0, 2.0, 3.0])
|
||||
mat = np.diag(v)
|
||||
v_diag = tf.matrix_diag(v)
|
||||
v_diag = array_ops.matrix_diag(v)
|
||||
self.assertEqual((3, 3), v_diag.get_shape())
|
||||
self.assertAllEqual(v_diag.eval(), mat)
|
||||
|
||||
@ -43,28 +51,30 @@ class MatrixDiagTest(tf.test.TestCase):
|
||||
[[4.0, 0.0, 0.0],
|
||||
[0.0, 5.0, 0.0],
|
||||
[0.0, 0.0, 6.0]]])
|
||||
v_batch_diag = tf.matrix_diag(v_batch)
|
||||
v_batch_diag = array_ops.matrix_diag(v_batch)
|
||||
self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
|
||||
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
|
||||
|
||||
def testInvalidShape(self):
|
||||
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
|
||||
tf.matrix_diag(0)
|
||||
array_ops.matrix_diag(0)
|
||||
|
||||
def testInvalidShapeAtEval(self):
|
||||
with self.test_session(use_gpu=self._use_gpu):
|
||||
v = tf.placeholder(dtype=tf.float32)
|
||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
with self.assertRaisesOpError("input must be at least 1-dim"):
|
||||
tf.matrix_diag(v).eval(feed_dict={v: 0.0})
|
||||
array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})
|
||||
|
||||
def testGrad(self):
|
||||
shapes = ((3,), (7, 4))
|
||||
with self.test_session(use_gpu=self._use_gpu):
|
||||
for shape in shapes:
|
||||
x = tf.constant(np.random.rand(*shape), np.float32)
|
||||
y = tf.matrix_diag(x)
|
||||
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
|
||||
y, y.get_shape().as_list())
|
||||
x = constant_op.constant(np.random.rand(*shape), np.float32)
|
||||
y = array_ops.matrix_diag(x)
|
||||
error = gradient_checker.compute_gradient_error(x,
|
||||
x.get_shape().as_list(),
|
||||
y,
|
||||
y.get_shape().as_list())
|
||||
self.assertLess(error, 1e-4)
|
||||
|
||||
|
||||
@ -72,7 +82,7 @@ class MatrixDiagGpuTest(MatrixDiagTest):
|
||||
_use_gpu = True
|
||||
|
||||
|
||||
class MatrixSetDiagTest(tf.test.TestCase):
|
||||
class MatrixSetDiagTest(test.TestCase):
|
||||
_use_gpu = False
|
||||
|
||||
def testSquare(self):
|
||||
@ -84,7 +94,7 @@ class MatrixSetDiagTest(tf.test.TestCase):
|
||||
mat_set_diag = np.array([[1.0, 1.0, 0.0],
|
||||
[1.0, 2.0, 1.0],
|
||||
[1.0, 1.0, 3.0]])
|
||||
output = tf.matrix_set_diag(mat, v)
|
||||
output = array_ops.matrix_set_diag(mat, v)
|
||||
self.assertEqual((3, 3), output.get_shape())
|
||||
self.assertAllEqual(mat_set_diag, output.eval())
|
||||
|
||||
@ -93,14 +103,14 @@ class MatrixSetDiagTest(tf.test.TestCase):
|
||||
v = np.array([3.0, 4.0])
|
||||
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
|
||||
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
|
||||
output = tf.matrix_set_diag(mat, v)
|
||||
output = array_ops.matrix_set_diag(mat, v)
|
||||
self.assertEqual((2, 3), output.get_shape())
|
||||
self.assertAllEqual(expected, output.eval())
|
||||
|
||||
v = np.array([3.0, 4.0])
|
||||
mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
|
||||
expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
|
||||
output = tf.matrix_set_diag(mat, v)
|
||||
output = array_ops.matrix_set_diag(mat, v)
|
||||
self.assertEqual((3, 2), output.get_shape())
|
||||
self.assertAllEqual(expected, output.eval())
|
||||
|
||||
@ -123,7 +133,7 @@ class MatrixSetDiagTest(tf.test.TestCase):
|
||||
[[-4.0, 0.0, 4.0],
|
||||
[0.0, -5.0, 0.0],
|
||||
[2.0, 0.0, -6.0]]])
|
||||
output = tf.matrix_set_diag(mat_batch, v_batch)
|
||||
output = array_ops.matrix_set_diag(mat_batch, v_batch)
|
||||
self.assertEqual((2, 3, 3), output.get_shape())
|
||||
self.assertAllEqual(mat_set_diag_batch, output.eval())
|
||||
|
||||
@ -142,52 +152,56 @@ class MatrixSetDiagTest(tf.test.TestCase):
|
||||
[0.0, -2.0, 0.0]],
|
||||
[[-4.0, 0.0, 4.0],
|
||||
[0.0, -5.0, 0.0]]])
|
||||
output = tf.matrix_set_diag(mat_batch, v_batch)
|
||||
output = array_ops.matrix_set_diag(mat_batch, v_batch)
|
||||
self.assertEqual((2, 2, 3), output.get_shape())
|
||||
self.assertAllEqual(mat_set_diag_batch, output.eval())
|
||||
|
||||
def testInvalidShape(self):
|
||||
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
|
||||
tf.matrix_set_diag(0, [0])
|
||||
array_ops.matrix_set_diag(0, [0])
|
||||
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
|
||||
tf.matrix_set_diag([[0]], 0)
|
||||
array_ops.matrix_set_diag([[0]], 0)
|
||||
|
||||
def testInvalidShapeAtEval(self):
|
||||
with self.test_session(use_gpu=self._use_gpu):
|
||||
v = tf.placeholder(dtype=tf.float32)
|
||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
with self.assertRaisesOpError("input must be at least 2-dim"):
|
||||
tf.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
|
||||
array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
|
||||
with self.assertRaisesOpError(
|
||||
r"but received input shape: \[1,1\] and diagonal shape: \[\]"):
|
||||
tf.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
|
||||
array_ops.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
|
||||
|
||||
def testGrad(self):
|
||||
shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8))
|
||||
with self.test_session(use_gpu=self._use_gpu):
|
||||
for shape in shapes:
|
||||
x = tf.constant(np.random.rand(*shape), dtype=tf.float32)
|
||||
x = constant_op.constant(
|
||||
np.random.rand(*shape), dtype=dtypes_lib.float32)
|
||||
diag_shape = shape[:-2] + (min(shape[-2:]),)
|
||||
x_diag = tf.constant(np.random.rand(*diag_shape), dtype=tf.float32)
|
||||
y = tf.matrix_set_diag(x, x_diag)
|
||||
error_x = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
|
||||
y, y.get_shape().as_list())
|
||||
x_diag = constant_op.constant(
|
||||
np.random.rand(*diag_shape), dtype=dtypes_lib.float32)
|
||||
y = array_ops.matrix_set_diag(x, x_diag)
|
||||
error_x = gradient_checker.compute_gradient_error(
|
||||
x, x.get_shape().as_list(), y, y.get_shape().as_list())
|
||||
self.assertLess(error_x, 1e-4)
|
||||
error_x_diag = tf.test.compute_gradient_error(
|
||||
x_diag, x_diag.get_shape().as_list(),
|
||||
y, y.get_shape().as_list())
|
||||
error_x_diag = gradient_checker.compute_gradient_error(
|
||||
x_diag, x_diag.get_shape().as_list(), y, y.get_shape().as_list())
|
||||
self.assertLess(error_x_diag, 1e-4)
|
||||
|
||||
def testGradWithNoShapeInformation(self):
|
||||
with self.test_session(use_gpu=self._use_gpu) as sess:
|
||||
v = tf.placeholder(dtype=tf.float32)
|
||||
mat = tf.placeholder(dtype=tf.float32)
|
||||
grad_input = tf.placeholder(dtype=tf.float32)
|
||||
output = tf.matrix_set_diag(mat, v)
|
||||
grads = tf.gradients(output, [mat, v], grad_ys=grad_input)
|
||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
mat = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
output = array_ops.matrix_set_diag(mat, v)
|
||||
grads = gradients_impl.gradients(output, [mat, v], grad_ys=grad_input)
|
||||
grad_input_val = np.random.rand(3, 3).astype(np.float32)
|
||||
grad_vals = sess.run(
|
||||
grads, feed_dict={v: 2 * np.ones(3), mat: np.ones((3, 3)),
|
||||
grad_input: grad_input_val})
|
||||
grad_vals = sess.run(grads,
|
||||
feed_dict={
|
||||
v: 2 * np.ones(3),
|
||||
mat: np.ones((3, 3)),
|
||||
grad_input: grad_input_val
|
||||
})
|
||||
self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])
|
||||
self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),
|
||||
grad_vals[0])
|
||||
@ -197,24 +211,24 @@ class MatrixSetDiagGpuTest(MatrixSetDiagTest):
|
||||
_use_gpu = True
|
||||
|
||||
|
||||
class MatrixDiagPartTest(tf.test.TestCase):
|
||||
class MatrixDiagPartTest(test.TestCase):
|
||||
_use_gpu = False
|
||||
|
||||
def testSquare(self):
|
||||
with self.test_session(use_gpu=self._use_gpu):
|
||||
v = np.array([1.0, 2.0, 3.0])
|
||||
mat = np.diag(v)
|
||||
mat_diag = tf.matrix_diag_part(mat)
|
||||
mat_diag = array_ops.matrix_diag_part(mat)
|
||||
self.assertEqual((3,), mat_diag.get_shape())
|
||||
self.assertAllEqual(mat_diag.eval(), v)
|
||||
|
||||
def testRectangular(self):
|
||||
with self.test_session(use_gpu=self._use_gpu):
|
||||
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
|
||||
mat_diag = tf.matrix_diag_part(mat)
|
||||
mat_diag = array_ops.matrix_diag_part(mat)
|
||||
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
|
||||
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
|
||||
mat_diag = tf.matrix_diag_part(mat)
|
||||
mat_diag = array_ops.matrix_diag_part(mat)
|
||||
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
|
||||
|
||||
def testSquareBatch(self):
|
||||
@ -229,7 +243,7 @@ class MatrixDiagPartTest(tf.test.TestCase):
|
||||
[0.0, 5.0, 0.0],
|
||||
[0.0, 0.0, 6.0]]])
|
||||
self.assertEqual(mat_batch.shape, (2, 3, 3))
|
||||
mat_batch_diag = tf.matrix_diag_part(mat_batch)
|
||||
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
|
||||
self.assertEqual((2, 3), mat_batch_diag.get_shape())
|
||||
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
|
||||
|
||||
@ -243,28 +257,30 @@ class MatrixDiagPartTest(tf.test.TestCase):
|
||||
[[4.0, 0.0, 0.0],
|
||||
[0.0, 5.0, 0.0]]])
|
||||
self.assertEqual(mat_batch.shape, (2, 2, 3))
|
||||
mat_batch_diag = tf.matrix_diag_part(mat_batch)
|
||||
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
|
||||
self.assertEqual((2, 2), mat_batch_diag.get_shape())
|
||||
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
|
||||
|
||||
def testInvalidShape(self):
|
||||
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
|
||||
tf.matrix_diag_part(0)
|
||||
array_ops.matrix_diag_part(0)
|
||||
|
||||
def testInvalidShapeAtEval(self):
|
||||
with self.test_session(use_gpu=self._use_gpu):
|
||||
v = tf.placeholder(dtype=tf.float32)
|
||||
v = array_ops.placeholder(dtype=dtypes_lib.float32)
|
||||
with self.assertRaisesOpError("input must be at least 2-dim"):
|
||||
tf.matrix_diag_part(v).eval(feed_dict={v: 0.0})
|
||||
array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
|
||||
|
||||
def testGrad(self):
|
||||
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
|
||||
with self.test_session(use_gpu=self._use_gpu):
|
||||
for shape in shapes:
|
||||
x = tf.constant(np.random.rand(*shape), dtype=np.float32)
|
||||
y = tf.matrix_diag_part(x)
|
||||
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
|
||||
y, y.get_shape().as_list())
|
||||
x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
|
||||
y = array_ops.matrix_diag_part(x)
|
||||
error = gradient_checker.compute_gradient_error(x,
|
||||
x.get_shape().as_list(),
|
||||
y,
|
||||
y.get_shape().as_list())
|
||||
self.assertLess(error, 1e-4)
|
||||
|
||||
|
||||
@ -272,13 +288,13 @@ class MatrixDiagPartGpuTest(MatrixDiagPartTest):
|
||||
_use_gpu = True
|
||||
|
||||
|
||||
class DiagTest(tf.test.TestCase):
|
||||
class DiagTest(test.TestCase):
|
||||
|
||||
def diagOp(self, diag, dtype, expected_ans, use_gpu=False):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
tf_ans = tf.diag(tf.convert_to_tensor(diag.astype(dtype)))
|
||||
tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))
|
||||
out = tf_ans.eval()
|
||||
tf_ans_inv = tf.diag_part(expected_ans)
|
||||
tf_ans_inv = array_ops.diag_part(expected_ans)
|
||||
inv_out = tf_ans_inv.eval()
|
||||
self.assertAllClose(out, expected_ans)
|
||||
self.assertAllClose(inv_out, diag)
|
||||
@ -392,19 +408,19 @@ class DiagTest(tf.test.TestCase):
|
||||
[[0 + 0j, 0 + 0j], [7.7 + 7.7j, 0 + 0j]]],
|
||||
[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
|
||||
[[0 + 0j, 0 + 0j], [0 + 0j, 8.8 + 8.8j]]]]]],
|
||||
dtype=dtype)
|
||||
dtype=dtype)
|
||||
self.diagOp(x, dtype, expected_ans)
|
||||
|
||||
|
||||
class DiagPartOpTest(tf.test.TestCase):
|
||||
class DiagPartOpTest(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
np.random.seed(0)
|
||||
|
||||
def diagPartOp(self, tensor, dtype, expected_ans, use_gpu=False):
|
||||
with self.test_session(use_gpu=use_gpu):
|
||||
tensor = tf.convert_to_tensor(tensor.astype(dtype))
|
||||
tf_ans_inv = tf.diag_part(tensor)
|
||||
tensor = ops.convert_to_tensor(tensor.astype(dtype))
|
||||
tf_ans_inv = array_ops.diag_part(tensor)
|
||||
inv_out = tf_ans_inv.eval()
|
||||
self.assertAllClose(inv_out, expected_ans)
|
||||
self.assertShapeEqual(expected_ans, tf_ans_inv)
|
||||
@ -422,9 +438,9 @@ class DiagPartOpTest(tf.test.TestCase):
|
||||
expected_ans = x[i, i]
|
||||
for shape in None, (None, 3), (3, None):
|
||||
with self.test_session(use_gpu=False):
|
||||
t = tf.convert_to_tensor(x.astype(np.float32))
|
||||
t = ops.convert_to_tensor(x.astype(np.float32))
|
||||
t.set_shape(shape)
|
||||
tf_ans = tf.diag_part(t)
|
||||
tf_ans = array_ops.diag_part(t)
|
||||
out = tf_ans.eval()
|
||||
self.assertAllClose(out, expected_ans)
|
||||
self.assertShapeEqual(expected_ans, tf_ans)
|
||||
@ -459,40 +475,41 @@ class DiagPartOpTest(tf.test.TestCase):
|
||||
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
|
||||
|
||||
|
||||
class DiagGradOpTest(tf.test.TestCase):
|
||||
class DiagGradOpTest(test.TestCase):
|
||||
|
||||
def testDiagGrad(self):
|
||||
np.random.seed(0)
|
||||
shapes = ((3,), (3,3), (3,3,3))
|
||||
dtypes = (tf.float32, tf.float64)
|
||||
shapes = ((3,), (3, 3), (3, 3, 3))
|
||||
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
|
||||
with self.test_session(use_gpu=False):
|
||||
errors = []
|
||||
for shape in shapes:
|
||||
for dtype in dtypes:
|
||||
x1 = tf.constant(np.random.rand(*shape), dtype=dtype)
|
||||
y = tf.diag(x1)
|
||||
error = tf.test.compute_gradient_error(x1, x1.get_shape().as_list(),
|
||||
y, y.get_shape().as_list())
|
||||
tf.logging.info("error = %f", error)
|
||||
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
|
||||
y = array_ops.diag(x1)
|
||||
error = gradient_checker.compute_gradient_error(
|
||||
x1, x1.get_shape().as_list(), y, y.get_shape().as_list())
|
||||
tf_logging.info("error = %f", error)
|
||||
self.assertLess(error, 1e-4)
|
||||
|
||||
|
||||
class DiagGradPartOpTest(tf.test.TestCase):
|
||||
class DiagGradPartOpTest(test.TestCase):
|
||||
|
||||
def testDiagPartGrad(self):
|
||||
np.random.seed(0)
|
||||
shapes = ((3,3), (3,3,3,3))
|
||||
dtypes = (tf.float32, tf.float64)
|
||||
shapes = ((3, 3), (3, 3, 3, 3))
|
||||
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
|
||||
with self.test_session(use_gpu=False):
|
||||
errors = []
|
||||
for shape in shapes:
|
||||
for dtype in dtypes:
|
||||
x1 = tf.constant(np.random.rand(*shape), dtype=dtype)
|
||||
y = tf.diag_part(x1)
|
||||
error = tf.test.compute_gradient_error(x1, x1.get_shape().as_list(),
|
||||
y, y.get_shape().as_list())
|
||||
tf.logging.info("error = %f", error)
|
||||
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
|
||||
y = array_ops.diag_part(x1)
|
||||
error = gradient_checker.compute_gradient_error(
|
||||
x1, x1.get_shape().as_list(), y, y.get_shape().as_list())
|
||||
tf_logging.info("error = %f", error)
|
||||
self.assertLess(error, 1e-4)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for division with division imported from __future__.
|
||||
|
||||
This file should be exactly the same as division_past_test.py except
|
||||
@ -24,25 +23,29 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DivisionTestCase(tf.test.TestCase):
|
||||
class DivisionTestCase(test.TestCase):
|
||||
|
||||
def testDivision(self):
|
||||
"""Test all the different ways to divide."""
|
||||
values = [1, 2, 7, 11]
|
||||
functions = (lambda x: x), tf.constant
|
||||
functions = (lambda x: x), constant_op.constant
|
||||
# TODO(irving): Test int8, int16 once we support casts for those.
|
||||
dtypes = np.int32, np.int64, np.float32, np.float64
|
||||
|
||||
def check(x, y):
|
||||
if isinstance(x, tf.Tensor):
|
||||
if isinstance(x, ops.Tensor):
|
||||
x = x.eval()
|
||||
if isinstance(y, tf.Tensor):
|
||||
if isinstance(y, ops.Tensor):
|
||||
y = y.eval()
|
||||
self.assertEqual(x.dtype, y.dtype)
|
||||
self.assertEqual(x, y)
|
||||
|
||||
with self.test_session():
|
||||
for dtype in dtypes:
|
||||
for x in map(dtype, values):
|
||||
@ -60,4 +63,4 @@ class DivisionTestCase(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for division with division imported from __future__.
|
||||
|
||||
This file should be exactly the same as division_past_test.py except
|
||||
@ -24,25 +23,29 @@ from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DivisionTestCase(tf.test.TestCase):
|
||||
class DivisionTestCase(test.TestCase):
|
||||
|
||||
def testDivision(self):
|
||||
"""Test all the different ways to divide."""
|
||||
values = [1, 2, 7, 11]
|
||||
functions = (lambda x: x), tf.constant
|
||||
functions = (lambda x: x), constant_op.constant
|
||||
# TODO(irving): Test int8, int16 once we support casts for those.
|
||||
dtypes = np.int32, np.int64, np.float32, np.float64
|
||||
|
||||
def check(x, y):
|
||||
if isinstance(x, tf.Tensor):
|
||||
if isinstance(x, ops.Tensor):
|
||||
x = x.eval()
|
||||
if isinstance(y, tf.Tensor):
|
||||
if isinstance(y, ops.Tensor):
|
||||
y = y.eval()
|
||||
self.assertEqual(x.dtype, y.dtype)
|
||||
self.assertEqual(x, y)
|
||||
|
||||
with self.test_session():
|
||||
for dtype in dtypes:
|
||||
for x in map(dtype, values):
|
||||
@ -60,4 +63,4 @@ class DivisionTestCase(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -13,15 +13,23 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Tests for draw_bounding_box_op."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import image_ops
|
||||
from tensorflow.python.ops import image_ops_impl
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DrawBoundingBoxOpTest(tf.test.TestCase):
|
||||
class DrawBoundingBoxOpTest(test.TestCase):
|
||||
|
||||
def _fillBorder(self, image, color):
|
||||
"""Fill the border of the image.
|
||||
@ -54,9 +62,8 @@ class DrawBoundingBoxOpTest(tf.test.TestCase):
|
||||
"""
|
||||
# THIS TABLE MUST MATCH draw_bounding_box_op.cc
|
||||
color_table = np.asarray([[1, 1, 0, 1], [0, 0, 1, 1], [1, 0, 0, 1],
|
||||
[0, 1, 0, 1], [0.5, 0, 0.5, 1],
|
||||
[0.5, 0.5, 0, 1], [0.5, 0, 0, 1],
|
||||
[0, 0, 0.5, 1], [0, 1, 1, 1],
|
||||
[0, 1, 0, 1], [0.5, 0, 0.5, 1], [0.5, 0.5, 0, 1],
|
||||
[0.5, 0, 0, 1], [0, 0, 0.5, 1], [0, 1, 1, 1],
|
||||
[1, 0, 1, 1]])
|
||||
assert len(img.shape) == 3
|
||||
depth = img.shape[2]
|
||||
@ -73,12 +80,12 @@ class DrawBoundingBoxOpTest(tf.test.TestCase):
|
||||
test_drawn_image = self._fillBorder(image, color)
|
||||
bboxes = np.asarray([0, 0, 1, 1])
|
||||
bboxes = np.vstack([bboxes for _ in range(num_boxes)])
|
||||
bboxes = tf.to_float(bboxes)
|
||||
bboxes = tf.expand_dims(bboxes, 0)
|
||||
image = tf.convert_to_tensor(image)
|
||||
image = tf.image.convert_image_dtype(image, tf.float32)
|
||||
image = tf.expand_dims(image, 0)
|
||||
image = tf.image.draw_bounding_boxes(image, bboxes)
|
||||
bboxes = math_ops.to_float(bboxes)
|
||||
bboxes = array_ops.expand_dims(bboxes, 0)
|
||||
image = ops.convert_to_tensor(image)
|
||||
image = image_ops_impl.convert_image_dtype(image, dtypes.float32)
|
||||
image = array_ops.expand_dims(image, 0)
|
||||
image = image_ops.draw_bounding_boxes(image, bboxes)
|
||||
with self.test_session(use_gpu=False) as sess:
|
||||
op_drawn_image = np.squeeze(sess.run(image), 0)
|
||||
self.assertAllEqual(test_drawn_image, op_drawn_image)
|
||||
@ -98,5 +105,6 @@ class DrawBoundingBoxOpTest(tf.test.TestCase):
|
||||
image = np.zeros([4, 4, 1], "float32")
|
||||
self._testDrawBoundingBoxColorCycling(image)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,24 +12,32 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for the DynamicPartition op."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import data_flow_ops
|
||||
from tensorflow.python.ops import gradients_impl
|
||||
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DynamicPartitionTest(tf.test.TestCase):
|
||||
class DynamicPartitionTest(test.TestCase):
|
||||
|
||||
def testSimpleOneDimensional(self):
|
||||
with self.test_session() as sess:
|
||||
data = tf.constant([0, 13, 2, 39, 4, 17])
|
||||
indices = tf.constant([0, 0, 2, 3, 2, 1])
|
||||
partitions = tf.dynamic_partition(data, indices, num_partitions=4)
|
||||
data = constant_op.constant([0, 13, 2, 39, 4, 17])
|
||||
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
data, indices, num_partitions=4)
|
||||
partition_vals = sess.run(partitions)
|
||||
|
||||
self.assertAllEqual([0, 13], partition_vals[0])
|
||||
@ -45,10 +53,11 @@ class DynamicPartitionTest(tf.test.TestCase):
|
||||
|
||||
def testSimpleTwoDimensional(self):
|
||||
with self.test_session() as sess:
|
||||
data = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
|
||||
[9, 10, 11], [12, 13, 14], [15, 16, 17]])
|
||||
indices = tf.constant([0, 0, 2, 3, 2, 1])
|
||||
partitions = tf.dynamic_partition(data, indices, num_partitions=4)
|
||||
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
|
||||
[12, 13, 14], [15, 16, 17]])
|
||||
indices = constant_op.constant([0, 0, 2, 3, 2, 1])
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
data, indices, num_partitions=4)
|
||||
partition_vals = sess.run(partitions)
|
||||
|
||||
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
|
||||
@ -70,9 +79,9 @@ class DynamicPartitionTest(tf.test.TestCase):
|
||||
partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
|
||||
for extra_shape in (), (6,), (6, 7):
|
||||
data = np.random.randn(*(shape + extra_shape))
|
||||
partitions_t = tf.constant(partitions, dtype=tf.int32)
|
||||
data_t = tf.constant(data)
|
||||
outputs = tf.dynamic_partition(
|
||||
partitions_t = constant_op.constant(partitions, dtype=dtypes.int32)
|
||||
data_t = constant_op.constant(data)
|
||||
outputs = data_flow_ops.dynamic_partition(
|
||||
data_t, partitions_t, num_partitions=n)
|
||||
self.assertEqual(n, len(outputs))
|
||||
outputs_val = sess.run(outputs)
|
||||
@ -81,16 +90,18 @@ class DynamicPartitionTest(tf.test.TestCase):
|
||||
|
||||
# Test gradients
|
||||
outputs_grad = [7 * output for output in outputs_val]
|
||||
grads = tf.gradients(outputs, [data_t, partitions_t], outputs_grad)
|
||||
grads = gradients_impl.gradients(outputs, [data_t, partitions_t],
|
||||
outputs_grad)
|
||||
self.assertEqual(grads[1], None) # Partitions has no gradients
|
||||
self.assertAllEqual(7 * data, sess.run(grads[0]))
|
||||
|
||||
def testErrorIndexOutOfRange(self):
|
||||
with self.test_session() as sess:
|
||||
data = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
|
||||
[9, 10, 11], [12, 13, 14]])
|
||||
indices = tf.constant([0, 2, 99, 2, 2])
|
||||
partitions = tf.dynamic_partition(data, indices, num_partitions=4)
|
||||
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
|
||||
[12, 13, 14]])
|
||||
indices = constant_op.constant([0, 2, 99, 2, 2])
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
data, indices, num_partitions=4)
|
||||
with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
|
||||
sess.run(partitions)
|
||||
|
||||
@ -98,16 +109,17 @@ class DynamicPartitionTest(tf.test.TestCase):
|
||||
with self.test_session() as sess:
|
||||
bad = 17
|
||||
data = np.zeros(5)
|
||||
partitions = tf.dynamic_partition(data, bad, num_partitions=7)
|
||||
partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7)
|
||||
with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
|
||||
sess.run(partitions)
|
||||
|
||||
def testHigherRankIndexOutOfRange(self):
|
||||
with self.test_session() as sess:
|
||||
shape = (2, 3)
|
||||
indices = tf.placeholder(shape=shape, dtype=np.int32)
|
||||
indices = array_ops.placeholder(shape=shape, dtype=np.int32)
|
||||
data = np.zeros(shape + (5,))
|
||||
partitions = tf.dynamic_partition(data, indices, num_partitions=7)
|
||||
partitions = data_flow_ops.dynamic_partition(
|
||||
data, indices, num_partitions=7)
|
||||
for i in xrange(2):
|
||||
for j in xrange(3):
|
||||
bad = np.zeros(shape, dtype=np.int32)
|
||||
@ -117,11 +129,11 @@ class DynamicPartitionTest(tf.test.TestCase):
|
||||
sess.run(partitions, feed_dict={indices: bad})
|
||||
|
||||
def testErrorWrongDimsIndices(self):
|
||||
data = tf.constant([[0], [1], [2]])
|
||||
indices = tf.constant([[0], [0]])
|
||||
data = constant_op.constant([[0], [1], [2]])
|
||||
indices = constant_op.constant([[0], [0]])
|
||||
with self.assertRaises(ValueError):
|
||||
tf.dynamic_partition(data, indices, num_partitions=4)
|
||||
data_flow_ops.dynamic_partition(data, indices, num_partitions=4)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,24 +12,29 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.ops.data_flow_ops.dynamic_stitch."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.ops import data_flow_ops
|
||||
from tensorflow.python.ops import gradients_impl
|
||||
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class DynamicStitchTest(tf.test.TestCase):
|
||||
class DynamicStitchTest(test.TestCase):
|
||||
|
||||
def testScalar(self):
|
||||
with self.test_session():
|
||||
indices = [tf.constant(0), tf.constant(1)]
|
||||
data = [tf.constant(40), tf.constant(60)]
|
||||
indices = [constant_op.constant(0), constant_op.constant(1)]
|
||||
data = [constant_op.constant(40), constant_op.constant(60)]
|
||||
for step in -1, 1:
|
||||
stitched_t = tf.dynamic_stitch(indices[::step], data)
|
||||
stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
|
||||
stitched_val = stitched_t.eval()
|
||||
self.assertAllEqual([40, 60][::step], stitched_val)
|
||||
# Dimension 0 is determined by the max index in indices, so we
|
||||
@ -39,11 +44,14 @@ class DynamicStitchTest(tf.test.TestCase):
|
||||
|
||||
def testSimpleOneDimensional(self):
|
||||
with self.test_session():
|
||||
indices = [tf.constant([0, 4, 7]),
|
||||
tf.constant([1, 6, 2, 3, 5])]
|
||||
data = [tf.constant([0, 40, 70]),
|
||||
tf.constant([10, 60, 20, 30, 50])]
|
||||
stitched_t = tf.dynamic_stitch(indices, data)
|
||||
indices = [
|
||||
constant_op.constant([0, 4, 7]), constant_op.constant([1, 6, 2, 3, 5])
|
||||
]
|
||||
data = [
|
||||
constant_op.constant([0, 40, 70]),
|
||||
constant_op.constant([10, 60, 20, 30, 50])
|
||||
]
|
||||
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
|
||||
stitched_val = stitched_t.eval()
|
||||
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
|
||||
# Dimension 0 is determined by the max index in indices, so we
|
||||
@ -53,9 +61,9 @@ class DynamicStitchTest(tf.test.TestCase):
|
||||
|
||||
def testOneListOneDimensional(self):
|
||||
with self.test_session():
|
||||
indices = [tf.constant([1, 6, 2, 3, 5, 0, 4, 7])]
|
||||
data = [tf.constant([10, 60, 20, 30, 50, 0, 40, 70])]
|
||||
stitched_t = tf.dynamic_stitch(indices, data)
|
||||
indices = [constant_op.constant([1, 6, 2, 3, 5, 0, 4, 7])]
|
||||
data = [constant_op.constant([10, 60, 20, 30, 50, 0, 40, 70])]
|
||||
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
|
||||
stitched_val = stitched_t.eval()
|
||||
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
|
||||
# Dimension 0 is determined by the max index in indices, so we
|
||||
@ -65,17 +73,19 @@ class DynamicStitchTest(tf.test.TestCase):
|
||||
|
||||
def testSimpleTwoDimensional(self):
|
||||
with self.test_session():
|
||||
indices = [tf.constant([0, 4, 7]),
|
||||
tf.constant([1, 6]),
|
||||
tf.constant([2, 3, 5])]
|
||||
data = [tf.constant([[0, 1], [40, 41], [70, 71]]),
|
||||
tf.constant([[10, 11], [60, 61]]),
|
||||
tf.constant([[20, 21], [30, 31], [50, 51]])]
|
||||
stitched_t = tf.dynamic_stitch(indices, data)
|
||||
indices = [
|
||||
constant_op.constant([0, 4, 7]), constant_op.constant([1, 6]),
|
||||
constant_op.constant([2, 3, 5])
|
||||
]
|
||||
data = [
|
||||
constant_op.constant([[0, 1], [40, 41], [70, 71]]),
|
||||
constant_op.constant([[10, 11], [60, 61]]),
|
||||
constant_op.constant([[20, 21], [30, 31], [50, 51]])
|
||||
]
|
||||
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
|
||||
stitched_val = stitched_t.eval()
|
||||
self.assertAllEqual(
|
||||
[[0, 1], [10, 11], [20, 21], [30, 31],
|
||||
[40, 41], [50, 51], [60, 61], [70, 71]], stitched_val)
|
||||
self.assertAllEqual([[0, 1], [10, 11], [20, 21], [30, 31], [40, 41],
|
||||
[50, 51], [60, 61], [70, 71]], stitched_val)
|
||||
# Dimension 0 is determined by the max index in indices, so we
|
||||
# can only infer that the output is a matrix with 2 columns and
|
||||
# some unknown number of rows.
|
||||
@ -83,54 +93,72 @@ class DynamicStitchTest(tf.test.TestCase):
|
||||
|
||||
def testHigherRank(self):
|
||||
with self.test_session() as sess:
|
||||
indices = [tf.constant(6), tf.constant([4, 1]),
|
||||
tf.constant([[5, 2], [0, 3]])]
|
||||
data = [tf.constant([61, 62]), tf.constant([[41, 42], [11, 12]]),
|
||||
tf.constant([[[51, 52], [21, 22]], [[1, 2], [31, 32]]])]
|
||||
stitched_t = tf.dynamic_stitch(indices, data)
|
||||
indices = [
|
||||
constant_op.constant(6), constant_op.constant([4, 1]),
|
||||
constant_op.constant([[5, 2], [0, 3]])
|
||||
]
|
||||
data = [
|
||||
constant_op.constant([61, 62]),
|
||||
constant_op.constant([[41, 42], [11, 12]]),
|
||||
constant_op.constant([[[51, 52], [21, 22]], [[1, 2], [31, 32]]])
|
||||
]
|
||||
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
|
||||
stitched_val = stitched_t.eval()
|
||||
correct = 10 * np.arange(7)[:, None] + [1, 2]
|
||||
self.assertAllEqual(correct, stitched_val)
|
||||
self.assertEqual([None, 2], stitched_t.get_shape().as_list())
|
||||
# Test gradients
|
||||
stitched_grad = 7 * stitched_val
|
||||
grads = tf.gradients(stitched_t, indices + data, stitched_grad)
|
||||
grads = gradients_impl.gradients(stitched_t, indices + data,
|
||||
stitched_grad)
|
||||
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
|
||||
for datum, grad in zip(data, sess.run(grads[3:])):
|
||||
self.assertAllEqual(7 * datum.eval(), grad)
|
||||
|
||||
def testErrorIndicesMultiDimensional(self):
|
||||
indices = [tf.constant([0, 4, 7]),
|
||||
tf.constant([[1, 6, 2, 3, 5]])]
|
||||
data = [tf.constant([[0, 40, 70]]),
|
||||
tf.constant([10, 60, 20, 30, 50])]
|
||||
indices = [
|
||||
constant_op.constant([0, 4, 7]), constant_op.constant([[1, 6, 2, 3, 5]])
|
||||
]
|
||||
data = [
|
||||
constant_op.constant([[0, 40, 70]]),
|
||||
constant_op.constant([10, 60, 20, 30, 50])
|
||||
]
|
||||
with self.assertRaises(ValueError):
|
||||
tf.dynamic_stitch(indices, data)
|
||||
data_flow_ops.dynamic_stitch(indices, data)
|
||||
|
||||
def testErrorDataNumDimsMismatch(self):
|
||||
indices = [tf.constant([0, 4, 7]),
|
||||
tf.constant([1, 6, 2, 3, 5])]
|
||||
data = [tf.constant([0, 40, 70]),
|
||||
tf.constant([[10, 60, 20, 30, 50]])]
|
||||
indices = [
|
||||
constant_op.constant([0, 4, 7]), constant_op.constant([1, 6, 2, 3, 5])
|
||||
]
|
||||
data = [
|
||||
constant_op.constant([0, 40, 70]),
|
||||
constant_op.constant([[10, 60, 20, 30, 50]])
|
||||
]
|
||||
with self.assertRaises(ValueError):
|
||||
tf.dynamic_stitch(indices, data)
|
||||
data_flow_ops.dynamic_stitch(indices, data)
|
||||
|
||||
def testErrorDataDimSizeMismatch(self):
|
||||
indices = [tf.constant([0, 4, 5]),
|
||||
tf.constant([1, 6, 2, 3])]
|
||||
data = [tf.constant([[0], [40], [70]]),
|
||||
tf.constant([[10, 11], [60, 61], [20, 21], [30, 31]])]
|
||||
indices = [
|
||||
constant_op.constant([0, 4, 5]), constant_op.constant([1, 6, 2, 3])
|
||||
]
|
||||
data = [
|
||||
constant_op.constant([[0], [40], [70]]),
|
||||
constant_op.constant([[10, 11], [60, 61], [20, 21], [30, 31]])
|
||||
]
|
||||
with self.assertRaises(ValueError):
|
||||
tf.dynamic_stitch(indices, data)
|
||||
data_flow_ops.dynamic_stitch(indices, data)
|
||||
|
||||
def testErrorDataAndIndicesSizeMismatch(self):
|
||||
indices = [tf.constant([0, 4, 7]),
|
||||
tf.constant([1, 6, 2, 3, 5])]
|
||||
data = [tf.constant([0, 40, 70]),
|
||||
tf.constant([10, 60, 20, 30])]
|
||||
indices = [
|
||||
constant_op.constant([0, 4, 7]), constant_op.constant([1, 6, 2, 3, 5])
|
||||
]
|
||||
data = [
|
||||
constant_op.constant([0, 40, 70]),
|
||||
constant_op.constant([10, 60, 20, 30])
|
||||
]
|
||||
with self.assertRaises(ValueError):
|
||||
tf.dynamic_stitch(indices, data)
|
||||
data_flow_ops.dynamic_stitch(indices, data)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
@ -12,29 +12,39 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Tests for tensorflow.kernels.edit_distance_op."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import sparse_tensor
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
def ConstantOf(x):
|
||||
x = np.asarray(x)
|
||||
# Convert to int64 if it's not a string or unicode
|
||||
if x.dtype.char not in "SU": x = np.asarray(x, dtype=np.int64)
|
||||
return tf.constant(x)
|
||||
if x.dtype.char not in "SU":
|
||||
x = np.asarray(x, dtype=np.int64)
|
||||
return constant_op.constant(x)
|
||||
|
||||
|
||||
class EditDistanceTest(tf.test.TestCase):
|
||||
class EditDistanceTest(test.TestCase):
|
||||
|
||||
def _testEditDistanceST(
|
||||
self, hypothesis_st, truth_st, normalize, expected_output,
|
||||
expected_shape, expected_err_re=None):
|
||||
edit_distance = tf.edit_distance(
|
||||
def _testEditDistanceST(self,
|
||||
hypothesis_st,
|
||||
truth_st,
|
||||
normalize,
|
||||
expected_output,
|
||||
expected_shape,
|
||||
expected_err_re=None):
|
||||
edit_distance = array_ops.edit_distance(
|
||||
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
|
||||
|
||||
if expected_err_re is None:
|
||||
@ -45,46 +55,49 @@ class EditDistanceTest(tf.test.TestCase):
|
||||
with self.assertRaisesOpError(expected_err_re):
|
||||
edit_distance.eval()
|
||||
|
||||
def _testEditDistance(self, hypothesis, truth, normalize,
|
||||
expected_output, expected_err_re=None):
|
||||
def _testEditDistance(self,
|
||||
hypothesis,
|
||||
truth,
|
||||
normalize,
|
||||
expected_output,
|
||||
expected_err_re=None):
|
||||
# Shape inference figures out the shape from the shape variables
|
||||
# Explicit tuple() needed since zip returns an iterator in Python 3.
|
||||
expected_shape = [
|
||||
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]]
|
||||
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]
|
||||
]
|
||||
|
||||
# SparseTensorValue inputs.
|
||||
with tf.Graph().as_default() as g, self.test_session(g):
|
||||
with ops.Graph().as_default() as g, self.test_session(g):
|
||||
# hypothesis and truth are (index, value, shape) tuples
|
||||
self._testEditDistanceST(
|
||||
hypothesis_st=tf.SparseTensorValue(
|
||||
hypothesis_st=sparse_tensor.SparseTensorValue(
|
||||
*[ConstantOf(x) for x in hypothesis]),
|
||||
truth_st=tf.SparseTensorValue(*[ConstantOf(x) for x in truth]),
|
||||
truth_st=sparse_tensor.SparseTensorValue(
|
||||
*[ConstantOf(x) for x in truth]),
|
||||
normalize=normalize,
|
||||
expected_output=expected_output,
|
||||
expected_shape=expected_shape,
|
||||
expected_err_re=expected_err_re)
|
||||
|
||||
# SparseTensor inputs.
|
||||
with tf.Graph().as_default() as g, self.test_session(g):
|
||||
with ops.Graph().as_default() as g, self.test_session(g):
|
||||
# hypothesis and truth are (index, value, shape) tuples
|
||||
self._testEditDistanceST(
|
||||
hypothesis_st=tf.SparseTensor(*[ConstantOf(x) for x in hypothesis]),
|
||||
truth_st=tf.SparseTensor(*[ConstantOf(x) for x in truth]),
|
||||
hypothesis_st=sparse_tensor.SparseTensor(
|
||||
*[ConstantOf(x) for x in hypothesis]),
|
||||
truth_st=sparse_tensor.SparseTensor(*[ConstantOf(x) for x in truth]),
|
||||
normalize=normalize,
|
||||
expected_output=expected_output,
|
||||
expected_shape=expected_shape,
|
||||
expected_err_re=expected_err_re)
|
||||
|
||||
def testEditDistanceNormalized(self):
|
||||
hypothesis_indices = [[0, 0], [0, 1],
|
||||
[1, 0], [1, 1]]
|
||||
hypothesis_values = [0, 1,
|
||||
1, -1]
|
||||
hypothesis_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
|
||||
hypothesis_values = [0, 1, 1, -1]
|
||||
hypothesis_shape = [2, 2]
|
||||
truth_indices = [[0, 0],
|
||||
[1, 0], [1, 1]]
|
||||
truth_values = [0,
|
||||
1, 1]
|
||||
truth_indices = [[0, 0], [1, 0], [1, 1]]
|
||||
truth_values = [0, 1, 1]
|
||||
truth_shape = [2, 2]
|
||||
expected_output = [1.0, 0.5]
|
||||
|
||||
@ -95,15 +108,11 @@ class EditDistanceTest(tf.test.TestCase):
|
||||
expected_output=expected_output)
|
||||
|
||||
def testEditDistanceUnnormalized(self):
|
||||
hypothesis_indices = [[0, 0],
|
||||
[1, 0], [1, 1]]
|
||||
hypothesis_values = [10,
|
||||
10, 11]
|
||||
hypothesis_indices = [[0, 0], [1, 0], [1, 1]]
|
||||
hypothesis_values = [10, 10, 11]
|
||||
hypothesis_shape = [2, 2]
|
||||
truth_indices = [[0, 0], [0, 1],
|
||||
[1, 0], [1, 1]]
|
||||
truth_values = [1, 2,
|
||||
1, -1]
|
||||
truth_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
|
||||
truth_values = [1, 2, 1, -1]
|
||||
truth_shape = [2, 3]
|
||||
expected_output = [2.0, 2.0]
|
||||
|
||||
@ -125,8 +134,7 @@ class EditDistanceTest(tf.test.TestCase):
|
||||
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
|
||||
truth_shape = [2, 11]
|
||||
expected_unnormalized = [6.0, 6.0]
|
||||
expected_normalized = [6.0/len("altruistic"),
|
||||
6.0/len("algorithm")]
|
||||
expected_normalized = [6.0 / len("altruistic"), 6.0 / len("algorithm")]
|
||||
|
||||
self._testEditDistance(
|
||||
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
|
||||
@ -141,17 +149,16 @@ class EditDistanceTest(tf.test.TestCase):
|
||||
expected_output=expected_normalized)
|
||||
|
||||
def testEditDistance3D(self):
|
||||
hypothesis_indices = [[0, 0, 0],
|
||||
[1, 0, 0]]
|
||||
hypothesis_indices = [[0, 0, 0], [1, 0, 0]]
|
||||
hypothesis_values = [0, 1]
|
||||
hypothesis_shape = [2, 1, 1]
|
||||
truth_indices = [[0, 1, 0],
|
||||
[1, 0, 0],
|
||||
[1, 1, 0]]
|
||||
truth_indices = [[0, 1, 0], [1, 0, 0], [1, 1, 0]]
|
||||
truth_values = [0, 1, 1]
|
||||
truth_shape = [2, 2, 1]
|
||||
expected_output = [[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
|
||||
[0.0, 1.0]] # (1,0): match, (1,1): no hypothesis
|
||||
expected_output = [
|
||||
[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
|
||||
[0.0, 1.0]
|
||||
] # (1,0): match, (1,1): no hypothesis
|
||||
|
||||
self._testEditDistance(
|
||||
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
|
||||
@ -206,4 +213,4 @@ class EditDistanceTest(tf.test.TestCase):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
||||
test.main()
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user