Merge changes from github.
END_PUBLIC --- Commitdaa67ad17
authored by Jonathan Hseu<vomjom@vomjom.net> Committed by Frank Chen<frankchn@gmail.com>: Remove unittest import (#11596) --- Commit491beb74c
authored by A. Unique TensorFlower<gardener@tensorflow.org> Committed by TensorFlower Gardener<gardener@tensorflow.org>: BEGIN_PUBLIC Automated g4 rollback of changelist 162423171 PiperOrigin-RevId: 162541442
This commit is contained in:
parent
1fefe92f47
commit
9cc871e81c
@ -1,8 +1,9 @@
|
|||||||
# Where component owners are known, add them here.
|
# Where component owners are known, add them here.
|
||||||
|
|
||||||
|
tensorflow/core/platform/windows/* @mrry
|
||||||
|
tensorflow/java/* @asimshankar
|
||||||
tensorflow/tensorboard/* @jart @dandelionmane
|
tensorflow/tensorboard/* @jart @dandelionmane
|
||||||
tensorflow/tools/docs/* @markdaoust
|
tensorflow/tools/docs/* @markdaoust
|
||||||
tensorflow/java/* @asimshankar
|
|
||||||
|
|
||||||
# contrib
|
# contrib
|
||||||
|
|
||||||
@ -46,5 +47,6 @@ tensorflow/contrib/stateless/* @girving
|
|||||||
tensorflow/contrib/tensor_forest/* @gilberthendry @thomascolthurst
|
tensorflow/contrib/tensor_forest/* @gilberthendry @thomascolthurst
|
||||||
tensorflow/contrib/testing/* @dandelionmane
|
tensorflow/contrib/testing/* @dandelionmane
|
||||||
tensorflow/contrib/timeseries/* @allenlavoie
|
tensorflow/contrib/timeseries/* @allenlavoie
|
||||||
|
tensorflow/contrib/tpu/* @frankchn @saeta @jhseu
|
||||||
tensorflow/contrib/training/* @joel-shor @ebrevdo
|
tensorflow/contrib/training/* @joel-shor @ebrevdo
|
||||||
tensorflow/contrib/util/* @sherrym
|
tensorflow/contrib/util/* @sherrym
|
||||||
|
20
configure
vendored
20
configure
vendored
@ -26,7 +26,7 @@ function is_windows() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function is_ppc64le() {
|
function is_ppc64le() {
|
||||||
[[ "${uname -m}" == "ppc64le" ]]
|
[[ "$(uname -m)" == "ppc64le" ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
function sed_in_place() {
|
function sed_in_place() {
|
||||||
@ -298,7 +298,7 @@ fi # TF_NEED_MKL
|
|||||||
|
|
||||||
## Set up architecture-dependent optimization flags.
|
## Set up architecture-dependent optimization flags.
|
||||||
if [ -z "$CC_OPT_FLAGS" ]; then
|
if [ -z "$CC_OPT_FLAGS" ]; then
|
||||||
if [ is_ppc64le ]; then
|
if is_ppc64le; then
|
||||||
# gcc on ppc64le does not support -march, use mcpu instead
|
# gcc on ppc64le does not support -march, use mcpu instead
|
||||||
default_cc_opt_flags="-mcpu=native"
|
default_cc_opt_flags="-mcpu=native"
|
||||||
else
|
else
|
||||||
@ -492,6 +492,8 @@ while true; do
|
|||||||
if [ -z "$TF_CUDA_VERSION" ]; then
|
if [ -z "$TF_CUDA_VERSION" ]; then
|
||||||
read -p "Please specify the CUDA SDK version you want to use, e.g. 7.0. [Leave empty to default to CUDA 8.0]: " TF_CUDA_VERSION
|
read -p "Please specify the CUDA SDK version you want to use, e.g. 7.0. [Leave empty to default to CUDA 8.0]: " TF_CUDA_VERSION
|
||||||
fi
|
fi
|
||||||
|
# Set default CUDA version if not set
|
||||||
|
TF_CUDA_VERSION=${TF_CUDA_VERSION:-8.0}
|
||||||
|
|
||||||
fromuser=""
|
fromuser=""
|
||||||
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
|
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
|
||||||
@ -545,11 +547,7 @@ while true; do
|
|||||||
CUDA_TOOLKIT_PATH=""
|
CUDA_TOOLKIT_PATH=""
|
||||||
done
|
done
|
||||||
|
|
||||||
# Set default CUDA version if not set
|
export TF_CUDA_VERSION
|
||||||
if [ -z "$TF_CUDA_VERSION" ]; then
|
|
||||||
TF_CUDA_VERSION="8.0"
|
|
||||||
export TF_CUDA_VERSION
|
|
||||||
fi
|
|
||||||
write_action_env_to_bazelrc "TF_CUDA_VERSION" "$TF_CUDA_VERSION"
|
write_action_env_to_bazelrc "TF_CUDA_VERSION" "$TF_CUDA_VERSION"
|
||||||
|
|
||||||
# Set up which gcc nvcc should use as the host compiler
|
# Set up which gcc nvcc should use as the host compiler
|
||||||
@ -587,6 +585,8 @@ while true; do
|
|||||||
if [ -z "$TF_CUDNN_VERSION" ]; then
|
if [ -z "$TF_CUDNN_VERSION" ]; then
|
||||||
read -p "Please specify the cuDNN version you want to use. [Leave empty to default to cuDNN 6.0]: " TF_CUDNN_VERSION
|
read -p "Please specify the cuDNN version you want to use. [Leave empty to default to cuDNN 6.0]: " TF_CUDNN_VERSION
|
||||||
fi
|
fi
|
||||||
|
# Set default CUDNN version if not set
|
||||||
|
TF_CUDNN_VERSION=${TF_CUDNN_VERSION:-6}
|
||||||
|
|
||||||
fromuser=""
|
fromuser=""
|
||||||
if [ -z "$CUDNN_INSTALL_PATH" ]; then
|
if [ -z "$CUDNN_INSTALL_PATH" ]; then
|
||||||
@ -659,11 +659,7 @@ while true; do
|
|||||||
CUDNN_INSTALL_PATH=""
|
CUDNN_INSTALL_PATH=""
|
||||||
done
|
done
|
||||||
|
|
||||||
# Set default CUDNN version if not set
|
export TF_CUDNN_VERSION
|
||||||
if [ -z "$TF_CUDNN_VERSION" ]; then
|
|
||||||
TF_CUDNN_VERSION="6"
|
|
||||||
export TF_CUDNN_VERSION
|
|
||||||
fi
|
|
||||||
write_action_env_to_bazelrc "TF_CUDNN_VERSION" "$TF_CUDNN_VERSION"
|
write_action_env_to_bazelrc "TF_CUDNN_VERSION" "$TF_CUDNN_VERSION"
|
||||||
|
|
||||||
# Configure the compute capabilities that TensorFlow builds for.
|
# Configure the compute capabilities that TensorFlow builds for.
|
||||||
|
@ -65,7 +65,7 @@ class SymbolicGradientBuilder {
|
|||||||
// gradients for the node associated with `src`.
|
// gradients for the node associated with `src`.
|
||||||
Status BackpropAlongEdge(const Output& dst_grad, const Output& src);
|
Status BackpropAlongEdge(const Output& dst_grad, const Output& src);
|
||||||
|
|
||||||
// Adds a node to the graph (returned in`grad`) that sums the in-bound
|
// Adds a node to the graph (returned in `grad`) that sums the in-bound
|
||||||
// gradients to `src` (if there are more than one).
|
// gradients to `src` (if there are more than one).
|
||||||
Status SumGradients(const Output& src, Output* grad);
|
Status SumGradients(const Output& src, Output* grad);
|
||||||
|
|
||||||
|
@ -45,9 +45,10 @@ extern const char* const DEVICE_XLA_CPU;
|
|||||||
extern const char* const DEVICE_XLA_GPU;
|
extern const char* const DEVICE_XLA_GPU;
|
||||||
|
|
||||||
constexpr std::array<DataType, 2> kIntTypes = {{DT_INT32, DT_INT64}};
|
constexpr std::array<DataType, 2> kIntTypes = {{DT_INT32, DT_INT64}};
|
||||||
constexpr std::array<DataType, 2> kFloatTypes = {{DT_FLOAT, DT_DOUBLE}};
|
constexpr std::array<DataType, 3> kFloatTypes = {
|
||||||
constexpr std::array<DataType, 4> kNumericTypes = {
|
{DT_HALF, DT_FLOAT, DT_DOUBLE}};
|
||||||
{DT_INT32, DT_INT64, DT_FLOAT, DT_DOUBLE}};
|
constexpr std::array<DataType, 5> kNumericTypes = {
|
||||||
|
{DT_INT32, DT_INT64, DT_HALF, DT_FLOAT, DT_DOUBLE}};
|
||||||
|
|
||||||
constexpr std::array<DataType, 5> kCpuAllTypes = {
|
constexpr std::array<DataType, 5> kCpuAllTypes = {
|
||||||
{DT_INT32, DT_INT64, DT_FLOAT, DT_DOUBLE, DT_BOOL}};
|
{DT_INT32, DT_INT64, DT_FLOAT, DT_DOUBLE, DT_BOOL}};
|
||||||
|
@ -15,6 +15,7 @@ limitations under the License.
|
|||||||
|
|
||||||
#include "tensorflow/compiler/xla/util.h"
|
#include "tensorflow/compiler/xla/util.h"
|
||||||
|
|
||||||
|
#include <numeric>
|
||||||
#include <stdarg.h>
|
#include <stdarg.h>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
|
||||||
|
@ -75,8 +75,8 @@ class UnionClusterResolver(ClusterResolver):
|
|||||||
|
|
||||||
This class performs a union given two or more existing ClusterResolvers. It
|
This class performs a union given two or more existing ClusterResolvers. It
|
||||||
merges the underlying ClusterResolvers, and returns one unified ClusterSpec
|
merges the underlying ClusterResolvers, and returns one unified ClusterSpec
|
||||||
when as_cluster_spec is called. The details of the merge function is
|
when cluster_spec is called. The details of the merge function is
|
||||||
documented in the as_cluster_spec function.
|
documented in the cluster_spec function.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *args):
|
def __init__(self, *args):
|
||||||
|
@ -225,7 +225,7 @@ Step-by-step Windows build
|
|||||||
|
|
||||||
* `-Dtensorflow_ENABLE_GPU=(ON|OFF)`. Defaults to `OFF`. Include
|
* `-Dtensorflow_ENABLE_GPU=(ON|OFF)`. Defaults to `OFF`. Include
|
||||||
GPU support. If GPU is enabled you need to install the CUDA 8.0 Toolkit and CUDNN 5.1.
|
GPU support. If GPU is enabled you need to install the CUDA 8.0 Toolkit and CUDNN 5.1.
|
||||||
CMake will expect the location of CUDNN in -DCUDNN_HOME=path_you_unziped_cudnn.
|
CMake will expect the location of CUDNN in -DCUDNN_HOME=path_you_unzipped_cudnn.
|
||||||
|
|
||||||
* `-Dtensorflow_BUILD_CC_TESTS=(ON|OFF)`. Defaults to `OFF`. This builds cc unit tests.
|
* `-Dtensorflow_BUILD_CC_TESTS=(ON|OFF)`. Defaults to `OFF`. This builds cc unit tests.
|
||||||
There are many of them and building will take a few hours.
|
There are many of them and building will take a few hours.
|
||||||
|
@ -416,7 +416,7 @@ def get_unique_variable(var_op_name):
|
|||||||
for candidate in candidates:
|
for candidate in candidates:
|
||||||
if candidate.op.name == var_op_name:
|
if candidate.op.name == var_op_name:
|
||||||
return candidate
|
return candidate
|
||||||
raise ValueError('Variable %s does not uniquely identify a variable',
|
raise ValueError('Variable %s does not uniquely identify a variable' %
|
||||||
var_op_name)
|
var_op_name)
|
||||||
|
|
||||||
|
|
||||||
@ -444,7 +444,7 @@ def assign_from_values(var_names_to_values):
|
|||||||
var_value = var_names_to_values[var_name]
|
var_value = var_names_to_values[var_name]
|
||||||
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
|
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
|
||||||
if not var:
|
if not var:
|
||||||
raise ValueError('Variable %s wasnt found', var_name)
|
raise ValueError('Variable %s wasn\'t found' % var_name)
|
||||||
elif len(var) > 1:
|
elif len(var) > 1:
|
||||||
# tf.get_collection is just a filter on the prefix: find the exact match:
|
# tf.get_collection is just a filter on the prefix: find the exact match:
|
||||||
found = False
|
found = False
|
||||||
@ -455,7 +455,7 @@ def assign_from_values(var_names_to_values):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if not found:
|
if not found:
|
||||||
raise ValueError('Variable %s doesnt uniquely identify a variable',
|
raise ValueError('Variable %s doesn\'t uniquely identify a variable' %
|
||||||
var_name)
|
var_name)
|
||||||
else:
|
else:
|
||||||
var = var[0]
|
var = var[0]
|
||||||
|
@ -35,7 +35,7 @@ def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
sparse_tensor: a `SparseTensor`.
|
sparse_tensor: a `SparseTensor`.
|
||||||
mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
|
mask: a 1D boolean dense `Tensor` whose length is equal to the 0th dimension
|
||||||
of `sparse_tensor`.
|
of `sparse_tensor`.
|
||||||
name: optional name for this operation.
|
name: optional name for this operation.
|
||||||
Returns:
|
Returns:
|
||||||
|
@ -25,8 +25,7 @@ import six
|
|||||||
from tensorflow.contrib import framework as framework_lib
|
from tensorflow.contrib import framework as framework_lib
|
||||||
from tensorflow.contrib import layers as layers_lib
|
from tensorflow.contrib import layers as layers_lib
|
||||||
from tensorflow.contrib import lookup as lookup_lib
|
from tensorflow.contrib import lookup as lookup_lib
|
||||||
# TODO(ptucker): Use tf.losses and tf.metrics.
|
# TODO(ptucker): Use tf.metrics.
|
||||||
from tensorflow.contrib import losses as losses_lib
|
|
||||||
from tensorflow.contrib import metrics as metrics_lib
|
from tensorflow.contrib import metrics as metrics_lib
|
||||||
from tensorflow.contrib.learn.python.learn.estimators import constants
|
from tensorflow.contrib.learn.python.learn.estimators import constants
|
||||||
from tensorflow.contrib.learn.python.learn.estimators import model_fn
|
from tensorflow.contrib.learn.python.learn.estimators import model_fn
|
||||||
@ -44,6 +43,7 @@ from tensorflow.python.ops import sparse_ops
|
|||||||
from tensorflow.python.ops import string_ops
|
from tensorflow.python.ops import string_ops
|
||||||
from tensorflow.python.ops import variable_scope
|
from tensorflow.python.ops import variable_scope
|
||||||
from tensorflow.python.ops import weights_broadcast_ops
|
from tensorflow.python.ops import weights_broadcast_ops
|
||||||
|
from tensorflow.python.ops.losses import losses as losses_lib
|
||||||
from tensorflow.python.platform import tf_logging as logging
|
from tensorflow.python.platform import tf_logging as logging
|
||||||
from tensorflow.python.summary import summary
|
from tensorflow.python.summary import summary
|
||||||
from tensorflow.python.training import training
|
from tensorflow.python.training import training
|
||||||
@ -1212,7 +1212,8 @@ class _BinarySvmHead(_SingleHead):
|
|||||||
with ops.name_scope(None, "hinge_loss", (logits, labels)) as name:
|
with ops.name_scope(None, "hinge_loss", (logits, labels)) as name:
|
||||||
with ops.control_dependencies((_assert_labels_rank(labels),)):
|
with ops.control_dependencies((_assert_labels_rank(labels),)):
|
||||||
labels = array_ops.reshape(labels, shape=(-1, 1))
|
labels = array_ops.reshape(labels, shape=(-1, 1))
|
||||||
loss = losses_lib.hinge_loss(logits=logits, labels=labels, scope=name)
|
loss = losses_lib.hinge_loss(labels=labels, logits=logits, scope=name,
|
||||||
|
reduction=losses_lib.Reduction.NONE)
|
||||||
return _compute_weighted_loss(loss, weights)
|
return _compute_weighted_loss(loss, weights)
|
||||||
|
|
||||||
super(_BinarySvmHead, self).__init__(
|
super(_BinarySvmHead, self).__init__(
|
||||||
|
@ -18,7 +18,7 @@ set -e
|
|||||||
|
|
||||||
# Make sure we're on OS X.
|
# Make sure we're on OS X.
|
||||||
if [[ $(uname) != "Darwin" ]]; then
|
if [[ $(uname) != "Darwin" ]]; then
|
||||||
echo "ERROR: This makefile build requires OS X, which the current system "\
|
echo "ERROR: This makefile build requires macOS, which the current system "\
|
||||||
"is not."
|
"is not."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@ -37,7 +37,9 @@ rm -rf tensorflow/contrib/makefile/downloads
|
|||||||
#
|
#
|
||||||
# ld: -bind_at_load and -bitcode_bundle (Xcode setting ENABLE_BITCODE=YES) cannot be used together
|
# ld: -bind_at_load and -bitcode_bundle (Xcode setting ENABLE_BITCODE=YES) cannot be used together
|
||||||
#
|
#
|
||||||
export MACOSX_DEPLOYMENT_TARGET="10.10"
|
if [[ -n MACOSX_DEPLOYMENT_TARGET ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=$(sw_vers -productVersion)
|
||||||
|
fi
|
||||||
|
|
||||||
# Pull down the required versions of the frameworks we need.
|
# Pull down the required versions of the frameworks we need.
|
||||||
tensorflow/contrib/makefile/download_dependencies.sh
|
tensorflow/contrib/makefile/download_dependencies.sh
|
||||||
@ -48,6 +50,5 @@ tensorflow/contrib/makefile/compile_ios_protobuf.sh
|
|||||||
# Build the iOS TensorFlow libraries.
|
# Build the iOS TensorFlow libraries.
|
||||||
tensorflow/contrib/makefile/compile_ios_tensorflow.sh "-O3"
|
tensorflow/contrib/makefile/compile_ios_tensorflow.sh "-O3"
|
||||||
|
|
||||||
# Creates a static universal library in
|
# Creates a static universal library in
|
||||||
# tensorflow/contrib/makefile/gen/lib/libtensorflow-core.a
|
# tensorflow/contrib/makefile/gen/lib/libtensorflow-core.a
|
||||||
|
|
||||||
|
@ -15,9 +15,12 @@
|
|||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
# Builds protobuf 3 for iOS.
|
# Builds protobuf 3 for iOS.
|
||||||
|
|
||||||
set -x
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
if [[ -n MACOSX_DEPLOYMENT_TARGET ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=$(sw_vers -productVersion)
|
||||||
|
fi
|
||||||
|
|
||||||
SCRIPT_DIR=$(dirname $0)
|
SCRIPT_DIR=$(dirname $0)
|
||||||
source "${SCRIPT_DIR}/build_helper.subr"
|
source "${SCRIPT_DIR}/build_helper.subr"
|
||||||
|
|
||||||
|
@ -31,6 +31,10 @@ function less_than_required_version() {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if [[ -n MACOSX_DEPLOYMENT_TARGET ]]; then
|
||||||
|
export MACOSX_DEPLOYMENT_TARGET=$(sw_vers -productVersion)
|
||||||
|
fi
|
||||||
|
|
||||||
ACTUAL_XCODE_VERSION=$(xcodebuild -version | head -n 1 | sed 's/Xcode //')
|
ACTUAL_XCODE_VERSION=$(xcodebuild -version | head -n 1 | sed 's/Xcode //')
|
||||||
REQUIRED_XCODE_VERSION=7.3.0
|
REQUIRED_XCODE_VERSION=7.3.0
|
||||||
if less_than_required_version $ACTUAL_XCODE_VERSION 7 3 0
|
if less_than_required_version $ACTUAL_XCODE_VERSION 7 3 0
|
||||||
@ -44,7 +48,7 @@ LIBDIR=${GENDIR}lib
|
|||||||
LIB_PREFIX=libtensorflow-core
|
LIB_PREFIX=libtensorflow-core
|
||||||
|
|
||||||
make -j"${JOB_COUNT}" -f tensorflow/contrib/makefile/Makefile \
|
make -j"${JOB_COUNT}" -f tensorflow/contrib/makefile/Makefile \
|
||||||
TARGET=IOS IOS_ARCH=ARMV7 LIB_NAME=${LIB_PREFIX}-armv7.a OPTFLAGS="$1"
|
TARGET=IOS IOS_ARCH=ARMV7 LIB_NAME=${LIB_PREFIX}-armv7.a OPTFLAGS="$1"
|
||||||
if [ $? -ne 0 ]
|
if [ $? -ne 0 ]
|
||||||
then
|
then
|
||||||
echo "armv7 compilation failed."
|
echo "armv7 compilation failed."
|
||||||
|
@ -37,7 +37,7 @@ def _shuffle_to_front(input_tensor, k):
|
|||||||
k: A scalar `Tensor` specifying how many indices to shuffle.
|
k: A scalar `Tensor` specifying how many indices to shuffle.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A tranposed version of `input_tensor` with `k` indices shuffled to the
|
A transposed version of `input_tensor` with `k` indices shuffled to the
|
||||||
front.
|
front.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
|
@ -81,7 +81,7 @@ more summaries and call the evaluation_loop method:
|
|||||||
|
|
||||||
# Evaluate every 10 minutes:
|
# Evaluate every 10 minutes:
|
||||||
slim.evaluation_loop(
|
slim.evaluation_loop(
|
||||||
master='',
|
'',
|
||||||
checkpoint_dir,
|
checkpoint_dir,
|
||||||
logdir,
|
logdir,
|
||||||
num_evals=num_evals,
|
num_evals=num_evals,
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
# tfprof: TensorFlow Profiler and Beyond
|
# tfprof: TensorFlow Profiler and Beyond
|
||||||
|
|
||||||
<h1>Please use `tf.profiler.xxx` instead of `tf.contrib.tfprof.xxx`</h1>
|
<h1>Please use `tf.profiler.xxx` instead of `tf.contrib.tfprof.xxx`</h1>
|
||||||
<h1>Full Document in tensorflow/core/profiler/README.md<h1>
|
<h1>Full Document in <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/profiler/README.md">tensorflow/core/profiler/README.md</a><h1>
|
||||||
|
|
||||||
###Features
|
### Features
|
||||||
|
|
||||||
* Profile model architectures
|
* Profile model architectures
|
||||||
* parameters, tensor shapes, float operations, device placement, etc.
|
* parameters, tensor shapes, float operations, device placement, etc.
|
||||||
@ -16,7 +16,7 @@
|
|||||||
* operation configuration check
|
* operation configuration check
|
||||||
* distributed runtime check (Not OSS)
|
* distributed runtime check (Not OSS)
|
||||||
|
|
||||||
###Interfaces
|
### Interfaces
|
||||||
|
|
||||||
* Python API
|
* Python API
|
||||||
* Command Line
|
* Command Line
|
||||||
|
@ -353,7 +353,7 @@ class SequentialTimeSeriesModel(TimeSeriesModel):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
current_times: A [batch size] Tensor of times for each observation.
|
current_times: A [batch size] Tensor of times for each observation.
|
||||||
current_values: A [batch size] Tensor of values for each observaiton.
|
current_values: A [batch size] Tensor of values for each observation.
|
||||||
state: Model state, updated to current_times.
|
state: Model state, updated to current_times.
|
||||||
predictions: The outputs of _prediction_step
|
predictions: The outputs of _prediction_step
|
||||||
Returns:
|
Returns:
|
||||||
|
@ -391,7 +391,7 @@ class StateSpaceModel(model.SequentialTimeSeriesModel):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
current_times: A [batch size] Tensor for times for each observation.
|
current_times: A [batch size] Tensor for times for each observation.
|
||||||
current_values: A [batch size] Tensor of values for each observaiton.
|
current_values: A [batch size] Tensor of values for each observation.
|
||||||
state: A tuple of (mean, covariance, previous_times) having shapes
|
state: A tuple of (mean, covariance, previous_times) having shapes
|
||||||
mean; [batch size x state dimension]
|
mean; [batch size x state dimension]
|
||||||
covariance; [batch size x state dimension x state dimension]
|
covariance; [batch size x state dimension x state dimension]
|
||||||
|
@ -65,7 +65,7 @@ load(
|
|||||||
"full_path",
|
"full_path",
|
||||||
"if_android",
|
"if_android",
|
||||||
"if_ios",
|
"if_ios",
|
||||||
"if_x86",
|
"if_linux_x86_64",
|
||||||
"if_not_mobile",
|
"if_not_mobile",
|
||||||
"if_not_windows",
|
"if_not_windows",
|
||||||
"tf_copts",
|
"tf_copts",
|
||||||
@ -1379,7 +1379,7 @@ cc_library(
|
|||||||
name = "lib_hash_crc32c_accelerate_internal",
|
name = "lib_hash_crc32c_accelerate_internal",
|
||||||
srcs = ["lib/hash/crc32c_accelerate.cc"],
|
srcs = ["lib/hash/crc32c_accelerate.cc"],
|
||||||
# -msse4.2 enables the use of crc32c compiler builtins.
|
# -msse4.2 enables the use of crc32c compiler builtins.
|
||||||
copts = tf_copts() + if_x86(["-msse4.2"]),
|
copts = tf_copts() + if_linux_x86_64(["-msse4.2"]),
|
||||||
)
|
)
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
|
@ -183,4 +183,18 @@ limitations under the License.
|
|||||||
#define TF_CALL_QUANTIZED_TYPES(m) \
|
#define TF_CALL_QUANTIZED_TYPES(m) \
|
||||||
TF_CALL_qint8(m) TF_CALL_quint8(m) TF_CALL_qint32(m)
|
TF_CALL_qint8(m) TF_CALL_quint8(m) TF_CALL_qint32(m)
|
||||||
|
|
||||||
|
#ifdef TENSORFLOW_SYCL_NO_DOUBLE
|
||||||
|
#define TF_CALL_SYCL_double(m)
|
||||||
|
#else // TENSORFLOW_SYCL_NO_DOUBLE
|
||||||
|
#define TF_CALL_SYCL_double(m) TF_CALL_double(m)
|
||||||
|
#endif // TENSORFLOW_SYCL_NO_DOUBLE
|
||||||
|
|
||||||
|
#ifdef __ANDROID_TYPES_SLIM__
|
||||||
|
#define TF_CALL_SYCL_NUMBER_TYPES(m) TF_CALL_float(m)
|
||||||
|
#else // __ANDROID_TYPES_SLIM__
|
||||||
|
#define TF_CALL_SYCL_NUMBER_TYPES(m) \
|
||||||
|
TF_CALL_float(m) \
|
||||||
|
TF_CALL_SYCL_double(m)
|
||||||
|
#endif // __ANDROID_TYPES_SLIM__
|
||||||
|
|
||||||
#endif // TENSORFLOW_FRAMEWORK_REGISTER_TYPES_H_
|
#endif // TENSORFLOW_FRAMEWORK_REGISTER_TYPES_H_
|
||||||
|
@ -31,7 +31,7 @@ class BatchDatasetOp : public UnaryDatasetOpKernel {
|
|||||||
|
|
||||||
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
|
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
|
||||||
DatasetBase** output) override {
|
DatasetBase** output) override {
|
||||||
int64 batch_size;
|
int64 batch_size = 0;
|
||||||
OP_REQUIRES_OK(ctx,
|
OP_REQUIRES_OK(ctx,
|
||||||
ParseScalarArgument<int64>(ctx, "batch_size", &batch_size));
|
ParseScalarArgument<int64>(ctx, "batch_size", &batch_size));
|
||||||
OP_REQUIRES(
|
OP_REQUIRES(
|
||||||
|
@ -36,7 +36,9 @@ REGISTER_KERNEL_BUILDER(Name("Add")
|
|||||||
|
|
||||||
|
|
||||||
#if TENSORFLOW_USE_SYCL
|
#if TENSORFLOW_USE_SYCL
|
||||||
REGISTER2(BinaryOp, SYCL, "Add", functor::add, float, double);
|
#define REGISTER_KERNEL(type) REGISTER(BinaryOp, SYCL, "Add", functor::add, type);
|
||||||
|
TF_CALL_SYCL_NUMBER_TYPES(REGISTER_KERNEL);
|
||||||
|
|
||||||
REGISTER_KERNEL_BUILDER(Name("Add")
|
REGISTER_KERNEL_BUILDER(Name("Add")
|
||||||
.Device(DEVICE_SYCL)
|
.Device(DEVICE_SYCL)
|
||||||
.HostMemory("x")
|
.HostMemory("x")
|
||||||
|
@ -82,22 +82,42 @@ class WindowsEnv : public Env {
|
|||||||
return new StdThread(thread_options, name, fn);
|
return new StdThread(thread_options, name, fn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static VOID CALLBACK SchedClosureCallback(PTP_CALLBACK_INSTANCE Instance,
|
||||||
|
PVOID Context, PTP_WORK Work) {
|
||||||
|
CloseThreadpoolWork(Work);
|
||||||
|
std::function<void()>* f = (std::function<void()>*)Context;
|
||||||
|
(*f)();
|
||||||
|
delete f;
|
||||||
|
}
|
||||||
void SchedClosure(std::function<void()> closure) override {
|
void SchedClosure(std::function<void()> closure) override {
|
||||||
// TODO(b/27290852): Spawning a new thread here is wasteful, but
|
PTP_WORK work = CreateThreadpoolWork(
|
||||||
// needed to deal with the fact that many `closure` functions are
|
SchedClosureCallback, new std::function<void()>(std::move(closure)),
|
||||||
// blocking in the current codebase.
|
nullptr);
|
||||||
std::thread closure_thread(closure);
|
SubmitThreadpoolWork(work);
|
||||||
closure_thread.detach();
|
}
|
||||||
|
|
||||||
|
static VOID CALLBACK SchedClosureAfterCallback(PTP_CALLBACK_INSTANCE Instance,
|
||||||
|
PVOID Context,
|
||||||
|
PTP_TIMER Timer) {
|
||||||
|
CloseThreadpoolTimer(Timer);
|
||||||
|
std::function<void()>* f = (std::function<void()>*)Context;
|
||||||
|
(*f)();
|
||||||
|
delete f;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SchedClosureAfter(int64 micros, std::function<void()> closure) override {
|
void SchedClosureAfter(int64 micros, std::function<void()> closure) override {
|
||||||
// TODO(b/27290852): Consuming a thread here is wasteful, but this
|
PTP_TIMER timer = CreateThreadpoolTimer(
|
||||||
// code is (currently) only used in the case where a step fails
|
SchedClosureAfterCallback,
|
||||||
// (AbortStep). This could be replaced by a timer thread
|
new std::function<void()>(std::move(closure)), nullptr);
|
||||||
SchedClosure([this, micros, closure]() {
|
// in 100 nanosecond units
|
||||||
SleepForMicroseconds(micros);
|
FILETIME FileDueTime;
|
||||||
closure();
|
ULARGE_INTEGER ulDueTime;
|
||||||
});
|
// Negative indicates the amount of time to wait is relative to the current
|
||||||
|
// time.
|
||||||
|
ulDueTime.QuadPart = (ULONGLONG) - (10 * micros);
|
||||||
|
FileDueTime.dwHighDateTime = ulDueTime.HighPart;
|
||||||
|
FileDueTime.dwLowDateTime = ulDueTime.LowPart;
|
||||||
|
SetThreadpoolTimer(timer, &FileDueTime, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status LoadLibrary(const char *library_filename, void** handle) override {
|
Status LoadLibrary(const char *library_filename, void** handle) override {
|
||||||
|
@ -24,7 +24,7 @@ opts = ProfileOptionBuilder(
|
|||||||
).build()
|
).build()
|
||||||
param_stats = tf.profiler.profile(
|
param_stats = tf.profiler.profile(
|
||||||
tf.get_default_graph(),
|
tf.get_default_graph(),
|
||||||
cmd='code'
|
cmd='code',
|
||||||
options=opts)
|
options=opts)
|
||||||
|
|
||||||
# param_stats can be tensorflow.tfprof.GraphNodeProto or
|
# param_stats can be tensorflow.tfprof.GraphNodeProto or
|
||||||
|
@ -5,7 +5,7 @@ TensorFlow is a fast moving project. In order for the community to better
|
|||||||
understand what the near future will bring, this document shares what we are
|
understand what the near future will bring, this document shares what we are
|
||||||
working on internally. Many of these features were requested by the community,
|
working on internally. Many of these features were requested by the community,
|
||||||
and we welcome
|
and we welcome
|
||||||
[contributions](https://github.com/tensorflow/tensorflow/labels/contributions%20welcome).
|
[contributions](https://github.com/tensorflow/tensorflow/labels/stat%3Acontributions%20welcome).
|
||||||
|
|
||||||
The features on this list are targeted for the next few months. At this point,
|
The features on this list are targeted for the next few months. At this point,
|
||||||
we do not have timelines for these features.
|
we do not have timelines for these features.
|
||||||
|
@ -155,7 +155,7 @@ REGISTER_KERNEL_BUILDER(Name("ZeroOut").Device(DEVICE_CPU), ZeroOutOp);
|
|||||||
### Multi-threaded CPU kernels
|
### Multi-threaded CPU kernels
|
||||||
|
|
||||||
To write a multi-threaded CPU kernel, the Shard function in
|
To write a multi-threaded CPU kernel, the Shard function in
|
||||||
[`work_sharder.h`](https://www.tensorflow.org/code/tensorflow/core/framework/work_sharder.h)
|
[`work_sharder.h`](https://www.tensorflow.org/code/tensorflow/core/util/work_sharder.h)
|
||||||
can be used. This function shards a computation function across the
|
can be used. This function shards a computation function across the
|
||||||
threads configured to be used for intra-op threading (see
|
threads configured to be used for intra-op threading (see
|
||||||
intra_op_parallelism_threads in
|
intra_op_parallelism_threads in
|
||||||
|
@ -168,7 +168,7 @@ and so determine which request types will be honored.
|
|||||||
> the corresponding `ExportOutput` entry. The inputs are always those provided
|
> the corresponding `ExportOutput` entry. The inputs are always those provided
|
||||||
> by the `serving_input_receiver_fn`.
|
> by the `serving_input_receiver_fn`.
|
||||||
> An inference request may specify the head by name. One head must be named
|
> An inference request may specify the head by name. One head must be named
|
||||||
> using [`signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`](https://www.tensorflow.org/code/saved_model/signature_constants.py)
|
> using [`signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`](https://www.tensorflow.org/code/tensorflow/python/saved_model/signature_constants.py)
|
||||||
> indicating which signature will be served when an inference request does not
|
> indicating which signature will be served when an inference request does not
|
||||||
> specify one.
|
> specify one.
|
||||||
|
|
||||||
@ -295,4 +295,3 @@ the different Tensorflow Serving APIs, selecting the signature by key, etc. -->
|
|||||||
|
|
||||||
<!-- TODO(soergel): document ExportStrategy here once Experiment moves
|
<!-- TODO(soergel): document ExportStrategy here once Experiment moves
|
||||||
from contrib to core. -->
|
from contrib to core. -->
|
||||||
|
|
||||||
|
@ -32,8 +32,8 @@ tensor's **rank** is its number of dimensions. Here are some examples of
|
|||||||
tensors:
|
tensors:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
3 # a rank 0 tensor; this is a scalar with shape []
|
3 # a rank 0 tensor; a scalar with shape []
|
||||||
[1., 2., 3.] # a rank 1 tensor; this is a vector with shape [3]
|
[1., 2., 3.] # a rank 1 tensor; a vector with shape [3]
|
||||||
[[1., 2., 3.], [4., 5., 6.]] # a rank 2 tensor; a matrix with shape [2, 3]
|
[[1., 2., 3.], [4., 5., 6.]] # a rank 2 tensor; a matrix with shape [2, 3]
|
||||||
[[[1., 2., 3.]], [[7., 8., 9.]]] # a rank 3 tensor with shape [2, 1, 3]
|
[[[1., 2., 3.]], [[7., 8., 9.]]] # a rank 3 tensor with shape [2, 1, 3]
|
||||||
```
|
```
|
||||||
@ -181,7 +181,7 @@ initial value:
|
|||||||
W = tf.Variable([.3], dtype=tf.float32)
|
W = tf.Variable([.3], dtype=tf.float32)
|
||||||
b = tf.Variable([-.3], dtype=tf.float32)
|
b = tf.Variable([-.3], dtype=tf.float32)
|
||||||
x = tf.placeholder(tf.float32)
|
x = tf.placeholder(tf.float32)
|
||||||
linear_model = W * x + b
|
linear_model = W*x + b
|
||||||
```
|
```
|
||||||
|
|
||||||
Constants are initialized when you call `tf.constant`, and their value can never
|
Constants are initialized when you call `tf.constant`, and their value can never
|
||||||
@ -302,7 +302,7 @@ W = tf.Variable([.3], dtype=tf.float32)
|
|||||||
b = tf.Variable([-.3], dtype=tf.float32)
|
b = tf.Variable([-.3], dtype=tf.float32)
|
||||||
# Model input and output
|
# Model input and output
|
||||||
x = tf.placeholder(tf.float32)
|
x = tf.placeholder(tf.float32)
|
||||||
linear_model = W * x + b
|
linear_model = W*x + b
|
||||||
y = tf.placeholder(tf.float32)
|
y = tf.placeholder(tf.float32)
|
||||||
|
|
||||||
# loss
|
# loss
|
||||||
@ -330,9 +330,9 @@ When run, it produces
|
|||||||
W: [-0.9999969] b: [ 0.99999082] loss: 5.69997e-11
|
W: [-0.9999969] b: [ 0.99999082] loss: 5.69997e-11
|
||||||
```
|
```
|
||||||
|
|
||||||
Notice that the loss is a very small number (very close to zero). If you run this
|
Notice that the loss is a very small number (very close to zero). If you run
|
||||||
program, your loss may not be the exact same because the model is initialized
|
this program, your loss may not be exactly the same as the aforementioned loss
|
||||||
with pseudorandom values.
|
because the model is initialized with pseudorandom values.
|
||||||
|
|
||||||
This more complicated program can still be visualized in TensorBoard
|
This more complicated program can still be visualized in TensorBoard
|
||||||

|

|
||||||
@ -426,7 +426,7 @@ def model_fn(features, labels, mode):
|
|||||||
# Build a linear model and predict values
|
# Build a linear model and predict values
|
||||||
W = tf.get_variable("W", [1], dtype=tf.float64)
|
W = tf.get_variable("W", [1], dtype=tf.float64)
|
||||||
b = tf.get_variable("b", [1], dtype=tf.float64)
|
b = tf.get_variable("b", [1], dtype=tf.float64)
|
||||||
y = W * features['x'] + b
|
y = W*features['x'] + b
|
||||||
# Loss sub-graph
|
# Loss sub-graph
|
||||||
loss = tf.reduce_sum(tf.square(y - labels))
|
loss = tf.reduce_sum(tf.square(y - labels))
|
||||||
# Training sub-graph
|
# Training sub-graph
|
||||||
|
@ -17,7 +17,7 @@ TensorBoard is fully configured, it looks like this:
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
This tutorial is intended to get you started with simple TensorBoard usage.
|
This tutorial is intended to get you started with simple TensorBoard usage.
|
||||||
There are other resources available as well! The [TensorBoard README](https://www.tensorflow.org/code/tensorflow/tensorboard/README.md)
|
There are other resources available as well! The [TensorBoard's GitHub](https://github.com/tensorflow/tensorboard)
|
||||||
has a lot more information on TensorBoard usage, including tips & tricks, and
|
has a lot more information on TensorBoard usage, including tips & tricks, and
|
||||||
debugging information.
|
debugging information.
|
||||||
|
|
||||||
@ -216,5 +216,4 @@ corner. Each tab represents a set of serialized data that can be visualized.
|
|||||||
For in depth information on how to use the *graph* tab to visualize your graph,
|
For in depth information on how to use the *graph* tab to visualize your graph,
|
||||||
see @{$graph_viz$TensorBoard: Graph Visualization}.
|
see @{$graph_viz$TensorBoard: Graph Visualization}.
|
||||||
|
|
||||||
For more usage information on TensorBoard in general, see the [TensorBoard
|
For more usage information on TensorBoard in general, see the [TensorBoard's GitHub](https://github.com/tensorflow/tensorboard).
|
||||||
README](https://www.tensorflow.org/code/tensorflow/tensorboard/README.md).
|
|
||||||
|
@ -447,7 +447,7 @@ dataset = tf.contrib.data.Dataset.from_tensor_slices(filenames)
|
|||||||
# * Filter out lines beginning with "#" (comments).
|
# * Filter out lines beginning with "#" (comments).
|
||||||
dataset = dataset.flat_map(
|
dataset = dataset.flat_map(
|
||||||
lambda filename: (
|
lambda filename: (
|
||||||
tf.contrib.data.Dataset.TextLineDataset(filename)
|
tf.contrib.data.TextLineDataset(filename)
|
||||||
.skip(1)
|
.skip(1)
|
||||||
.filter(lambda line: tf.not_equal(tf.substr(line, 0, 1), "#"))))
|
.filter(lambda line: tf.not_equal(tf.substr(line, 0, 1), "#"))))
|
||||||
```
|
```
|
||||||
|
@ -137,7 +137,7 @@ Once TensorBoard is running, navigate your web browser to `localhost:6006` to vi
|
|||||||
|
|
||||||
The script will log TensorBoard summaries to `/tmp/retrain_logs` by default. You can change the directory with the `--summaries_dir` flag.
|
The script will log TensorBoard summaries to `/tmp/retrain_logs` by default. You can change the directory with the `--summaries_dir` flag.
|
||||||
|
|
||||||
The [TensorBoard README](https://www.tensorflow.org/code/tensorflow/tensorboard/README.md) has a lot more information on TensorBoard usage, including tips & tricks, and debugging information.
|
The [TensorBoard's GitHub](https://github.com/tensorflow/tensorboard) has a lot more information on TensorBoard usage, including tips & tricks, and debugging information.
|
||||||
|
|
||||||
## Using the Retrained Model
|
## Using the Retrained Model
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ the (much larger) training set.
|
|||||||
|
|
||||||
By default the script uses a pretrained version of the Inception v3 model
|
By default the script uses a pretrained version of the Inception v3 model
|
||||||
architecture. This is a good place to start because it provides high accuracy
|
architecture. This is a good place to start because it provides high accuracy
|
||||||
results, but if you intend to deploy your model on mobile devices or other
|
results, but if you intend to deploy your model on mobile devices or other
|
||||||
resource-constrained environments you may want to trade off a little accuracy
|
resource-constrained environments you may want to trade off a little accuracy
|
||||||
for much smaller file sizes or faster speeds. To help with that, the
|
for much smaller file sizes or faster speeds. To help with that, the
|
||||||
[retrain.py script](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py)
|
[retrain.py script](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py)
|
||||||
|
@ -87,7 +87,7 @@ education = tf.feature_column.categorical_column_with_vocabulary_list(
|
|||||||
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
|
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
|
||||||
"Preschool", "12th"
|
"Preschool", "12th"
|
||||||
])
|
])
|
||||||
tf.feature_column.categorical_column_with_vocabulary_list(
|
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
|
||||||
"marital_status", [
|
"marital_status", [
|
||||||
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
|
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
|
||||||
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
|
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
|
||||||
|
@ -253,7 +253,7 @@ try:
|
|||||||
from sklearn.manifold import TSNE
|
from sklearn.manifold import TSNE
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
|
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
|
||||||
plot_only = 500
|
plot_only = 500
|
||||||
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
|
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
|
||||||
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
|
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
|
||||||
|
@ -806,7 +806,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"num_points = 400\n",
|
"num_points = 400\n",
|
||||||
"\n",
|
"\n",
|
||||||
"tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)\n",
|
"tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')\n",
|
||||||
"two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])"
|
"two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])"
|
||||||
],
|
],
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
|
@ -22,7 +22,10 @@ java_library(
|
|||||||
# .aar. At some point, might make sense for a .aar rule here instead.
|
# .aar. At some point, might make sense for a .aar rule here instead.
|
||||||
filegroup(
|
filegroup(
|
||||||
name = "java_sources",
|
name = "java_sources",
|
||||||
srcs = glob(["src/main/java/org/tensorflow/*.java"]),
|
srcs = glob([
|
||||||
|
"src/main/java/org/tensorflow/*.java",
|
||||||
|
"src/main/java/org/tensorflow/types/*.java",
|
||||||
|
]),
|
||||||
visibility = [
|
visibility = [
|
||||||
"//tensorflow/contrib/android:__pkg__",
|
"//tensorflow/contrib/android:__pkg__",
|
||||||
"//tensorflow/java:__pkg__",
|
"//tensorflow/java:__pkg__",
|
||||||
|
40
tensorflow/java/src/gen/perl/tftypes-runall.pl
Normal file
40
tensorflow/java/src/gen/perl/tftypes-runall.pl
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
#!/usr/bin/perl
|
||||||
|
#
|
||||||
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
my $script = $0;
|
||||||
|
my $dir = `dirname $script`;
|
||||||
|
chomp $dir;
|
||||||
|
my $gen = "$dir/..";
|
||||||
|
my $tfjavasrc = "$gen/..";
|
||||||
|
my $rsrc = "$gen/resources";
|
||||||
|
my $root = "$tfjavasrc/main/java";
|
||||||
|
my $pkg = "$root/org/tensorflow";
|
||||||
|
|
||||||
|
sub locchk {
|
||||||
|
(my $f) = @_;
|
||||||
|
if (! -r $f) {
|
||||||
|
print STDERR "Script tftypes-runall seems to be located in the wrong place (could not find $f)\n";
|
||||||
|
exit 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
&locchk("$gen");
|
||||||
|
&locchk("$tfjavasrc/gen");
|
||||||
|
&locchk("$dir/tftypes.pl");
|
||||||
|
&locchk("$rsrc/tftypes.csv");
|
||||||
|
|
||||||
|
system("perl $dir/tftypes.pl -t $rsrc/tftypes.csv $pkg/types");
|
||||||
|
# system("perl $dir/tftypes.pl -c $rsrc/tftypes.csv $rsrc/Tensors.java.tmpl > $pkg/op/Tensors.java");
|
157
tensorflow/java/src/gen/perl/tftypes.pl
Normal file
157
tensorflow/java/src/gen/perl/tftypes.pl
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
#!/usr/bin/perl
|
||||||
|
#
|
||||||
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
use strict;
|
||||||
|
|
||||||
|
my $copyright =
|
||||||
|
'/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
';
|
||||||
|
|
||||||
|
my $count;
|
||||||
|
|
||||||
|
my $option = '-t', my $template;
|
||||||
|
|
||||||
|
sub usage {
|
||||||
|
print "Usage: tftypes [-ctdT] <type desc file> <tmpl file>\n\n"
|
||||||
|
."This script generates parts of various .java files that depend on which"
|
||||||
|
."TensorFlow types are supported by the Java API and how much. For each"
|
||||||
|
."such .java file, there is a .tmpl file in the same source directory in"
|
||||||
|
."which the strings \@TYPEINFO\@ and \@IMPORTS\@ are replaced with"
|
||||||
|
."appropriate Java code. Output code is sent to standard output.\n\n";
|
||||||
|
|
||||||
|
print "Modulo putting in the correct directory names, it can be invoked as follows:\n";
|
||||||
|
print "tftypes -c tftypes.csv Tensors.java.tmpl > Tensors.java\n";
|
||||||
|
print "tftypes -t tftypes.csv <dir> [outputs files to dir]\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($ARGV[0] =~ m/^-/) {
|
||||||
|
$option = shift;
|
||||||
|
}
|
||||||
|
my $typedesc = shift;
|
||||||
|
my $tmpl = shift;
|
||||||
|
|
||||||
|
my $dirname;
|
||||||
|
|
||||||
|
if ($option eq '-t') {
|
||||||
|
$dirname = $tmpl;
|
||||||
|
}
|
||||||
|
|
||||||
|
open (TMPL, "<$tmpl") || die "Cannot open $tmpl for reading\n";
|
||||||
|
|
||||||
|
my $text = do { local $/; <TMPL> };
|
||||||
|
|
||||||
|
my %jtypecount;
|
||||||
|
|
||||||
|
my $typeinfo, my $imports;
|
||||||
|
|
||||||
|
open (TYPEDESC, $typedesc);
|
||||||
|
|
||||||
|
my @info = ([]);
|
||||||
|
|
||||||
|
while (<TYPEDESC>) {
|
||||||
|
chomp;
|
||||||
|
my $line = $_;
|
||||||
|
if ($line =~ m/^TF type/) { next }
|
||||||
|
$line =~ s/\r$//;
|
||||||
|
(my $name, my $jtype, my $creat, my $default, my $desc) =
|
||||||
|
split /,/, $line, 5;
|
||||||
|
$desc =~ s/^ *//g;
|
||||||
|
$desc =~ s/ *$//g;
|
||||||
|
$jtypecount{$jtype}++;
|
||||||
|
if ($jtypecount{$jtype} > 1) {
|
||||||
|
# currently allowing Java types to stand for more than one TF type, but
|
||||||
|
# may want to revisit this.
|
||||||
|
# print STDERR "Ambiguous Java type for $name : $jtype\n";
|
||||||
|
# exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
push @info, [$name, $jtype, $creat, $default, $desc];
|
||||||
|
}
|
||||||
|
|
||||||
|
for (my $i = 1; $i <= $#info; $i++) {
|
||||||
|
(my $name, my $jtype, my $creat, my $default, my $desc) =
|
||||||
|
@{$info[$i]};
|
||||||
|
my $tfname = "TF".$name;
|
||||||
|
my $ucname = uc $name;
|
||||||
|
|
||||||
|
if ($option eq '-t') {
|
||||||
|
if ($jtype eq '') { next }
|
||||||
|
# Generate class declarations
|
||||||
|
# print STDERR "Creating $dirname/$tfname.java\n";
|
||||||
|
open (CLASSFILE, ">$dirname/$tfname.java") || die "Can't open $tfname.java";
|
||||||
|
print CLASSFILE $copyright;
|
||||||
|
print CLASSFILE "// GENERATED FILE. To update, edit tftypes.pl instead.\n\n";
|
||||||
|
|
||||||
|
my $fulldesc = $desc;
|
||||||
|
if (substr($desc, 0, 1) =~ m/^[aeoiu8]$/i) {
|
||||||
|
$fulldesc = "an $desc"
|
||||||
|
} else {
|
||||||
|
$fulldesc = "a $desc"
|
||||||
|
}
|
||||||
|
print CLASSFILE "package org.tensorflow.types;\n\n"
|
||||||
|
."import org.tensorflow.DataType;\n\n";
|
||||||
|
print CLASSFILE "/** Represents $fulldesc. */\n"
|
||||||
|
."public class $tfname implements TFType {\n"
|
||||||
|
." private $tfname() {}\n"
|
||||||
|
." static {\n"
|
||||||
|
." Types.typeCodes.put($tfname.class, DataType.$ucname);\n"
|
||||||
|
." }\n";
|
||||||
|
if ($default ne '') {
|
||||||
|
print CLASSFILE
|
||||||
|
" static {\n"
|
||||||
|
." Types.scalars.put($tfname.class, $default);\n"
|
||||||
|
." }\n";
|
||||||
|
}
|
||||||
|
print CLASSFILE "}\n";
|
||||||
|
close(CLASSFILE);
|
||||||
|
} elsif ($option eq '-c') {
|
||||||
|
# Generate creator declarations for Tensors.java
|
||||||
|
if ($jtype ne '' && $creat eq 'y') {
|
||||||
|
for (my $brackets = ''; length $brackets <= 12; $brackets .= '[]') {
|
||||||
|
$typeinfo .=
|
||||||
|
" public static Tensor<$tfname> create($jtype$brackets data) {\n"
|
||||||
|
." return Tensor.create(data, $tfname.class);\n"
|
||||||
|
." }\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ($text =~ m/\b$tfname\b/ || $creat eq 'y') {
|
||||||
|
$imports .= "import org.tensorflow.types.$tfname;\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($option ne '-t') {
|
||||||
|
print "// GENERATED FILE. Edits to this file will be lost -- edit $tmpl instead.\n";
|
||||||
|
|
||||||
|
$text =~ s/\@TYPEINFO\@/$typeinfo/;
|
||||||
|
$text =~ s/\@IMPORTS\@/$imports/;
|
||||||
|
|
||||||
|
print $text;
|
||||||
|
}
|
21
tensorflow/java/src/gen/resources/tftypes.csv
Normal file
21
tensorflow/java/src/gen/resources/tftypes.csv
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
TF type,Java type,Creator?,Zero value,Description
|
||||||
|
Float,float,y,0f,32-bit single precision floating point number
|
||||||
|
Double,double,y,0.0,64-bit double precision floating point number
|
||||||
|
Int32,int,y,0,32-bit signed integer
|
||||||
|
UInt8,byte,n,(byte)0,8-bit unsigned integer
|
||||||
|
Int16,,n,(short)0,16-bit signed integer
|
||||||
|
Int8,,n,(byte)0,8-bit signed integer
|
||||||
|
String,byte,n,,arbitrary sequence of bytes
|
||||||
|
Complex64,,n,,single-precision complex number
|
||||||
|
Int64,long,y,0L,64-bit signed integer
|
||||||
|
Bool,boolean,y,false,boolean
|
||||||
|
QInt8,,n,,quantized int8
|
||||||
|
QUInt8,,n,,quantized uint8
|
||||||
|
QInt32,,n,,quantized int32
|
||||||
|
BFloat16,,n,,float32 truncated to 16 bits. Only for cast ops.
|
||||||
|
QInt16,,n,,quantized int16
|
||||||
|
QUInt16,,n,,quantized uint16
|
||||||
|
UInt16,,n,,16-bit unsigned integer
|
||||||
|
Complex128,,n,,double-precision complex number
|
||||||
|
Half,,n,,
|
||||||
|
Resource,,n,,
|
|
@ -53,12 +53,14 @@ public enum DataType {
|
|||||||
int c() {
|
int c() {
|
||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cached to avoid copying it
|
||||||
|
final private static DataType[] values = values();
|
||||||
|
|
||||||
static DataType fromC(int c) {
|
static DataType fromC(int c) {
|
||||||
for (DataType t : DataType.values()) {
|
for (DataType t : values) {
|
||||||
if (t.c() == c) {
|
if (t.value == c)
|
||||||
return t;
|
return t;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"DataType " + c + " is not recognized in Java (version " + TensorFlow.version() + ")");
|
"DataType " + c + " is not recognized in Java (version " + TensorFlow.version() + ")");
|
||||||
|
@ -0,0 +1,30 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
// GENERATED FILE. To update, edit tftypes.pl instead.
|
||||||
|
|
||||||
|
package org.tensorflow.types;
|
||||||
|
|
||||||
|
import org.tensorflow.DataType;
|
||||||
|
|
||||||
|
/** Represents a boolean. */
|
||||||
|
public class TFBool implements TFType {
|
||||||
|
private TFBool() {}
|
||||||
|
static {
|
||||||
|
Types.typeCodes.put(TFBool.class, DataType.BOOL);
|
||||||
|
}
|
||||||
|
static {
|
||||||
|
Types.scalars.put(TFBool.class, false);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,30 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
// GENERATED FILE. To update, edit tftypes.pl instead.
|
||||||
|
|
||||||
|
package org.tensorflow.types;
|
||||||
|
|
||||||
|
import org.tensorflow.DataType;
|
||||||
|
|
||||||
|
/** Represents a 64-bit double precision floating point number. */
|
||||||
|
public class TFDouble implements TFType {
|
||||||
|
private TFDouble() {}
|
||||||
|
static {
|
||||||
|
Types.typeCodes.put(TFDouble.class, DataType.DOUBLE);
|
||||||
|
}
|
||||||
|
static {
|
||||||
|
Types.scalars.put(TFDouble.class, 0.0);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,30 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
// GENERATED FILE. To update, edit tftypes.pl instead.
|
||||||
|
|
||||||
|
package org.tensorflow.types;
|
||||||
|
|
||||||
|
import org.tensorflow.DataType;
|
||||||
|
|
||||||
|
/** Represents a 32-bit single precision floating point number. */
|
||||||
|
public class TFFloat implements TFType {
|
||||||
|
private TFFloat() {}
|
||||||
|
static {
|
||||||
|
Types.typeCodes.put(TFFloat.class, DataType.FLOAT);
|
||||||
|
}
|
||||||
|
static {
|
||||||
|
Types.scalars.put(TFFloat.class, 0f);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,30 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
// GENERATED FILE. To update, edit tftypes.pl instead.
|
||||||
|
|
||||||
|
package org.tensorflow.types;
|
||||||
|
|
||||||
|
import org.tensorflow.DataType;
|
||||||
|
|
||||||
|
/** Represents a 32-bit signed integer. */
|
||||||
|
public class TFInt32 implements TFType {
|
||||||
|
private TFInt32() {}
|
||||||
|
static {
|
||||||
|
Types.typeCodes.put(TFInt32.class, DataType.INT32);
|
||||||
|
}
|
||||||
|
static {
|
||||||
|
Types.scalars.put(TFInt32.class, 0);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,30 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
// GENERATED FILE. To update, edit tftypes.pl instead.
|
||||||
|
|
||||||
|
package org.tensorflow.types;
|
||||||
|
|
||||||
|
import org.tensorflow.DataType;
|
||||||
|
|
||||||
|
/** Represents a 64-bit signed integer. */
|
||||||
|
public class TFInt64 implements TFType {
|
||||||
|
private TFInt64() {}
|
||||||
|
static {
|
||||||
|
Types.typeCodes.put(TFInt64.class, DataType.INT64);
|
||||||
|
}
|
||||||
|
static {
|
||||||
|
Types.scalars.put(TFInt64.class, 0L);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,27 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
// GENERATED FILE. To update, edit tftypes.pl instead.
|
||||||
|
|
||||||
|
package org.tensorflow.types;
|
||||||
|
|
||||||
|
import org.tensorflow.DataType;
|
||||||
|
|
||||||
|
/** Represents an arbitrary sequence of bytes. */
|
||||||
|
public class TFString implements TFType {
|
||||||
|
private TFString() {}
|
||||||
|
static {
|
||||||
|
Types.typeCodes.put(TFString.class, DataType.STRING);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,20 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
package org.tensorflow.types;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A marker interface for classes representing TensorFlow types.
|
||||||
|
*/
|
||||||
|
public interface TFType {}
|
@ -0,0 +1,30 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
// GENERATED FILE. To update, edit tftypes.pl instead.
|
||||||
|
|
||||||
|
package org.tensorflow.types;
|
||||||
|
|
||||||
|
import org.tensorflow.DataType;
|
||||||
|
|
||||||
|
/** Represents an 8-bit unsigned integer. */
|
||||||
|
public class TFUInt8 implements TFType {
|
||||||
|
private TFUInt8() {}
|
||||||
|
static {
|
||||||
|
Types.typeCodes.put(TFUInt8.class, DataType.UINT8);
|
||||||
|
}
|
||||||
|
static {
|
||||||
|
Types.scalars.put(TFUInt8.class, (byte)0);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,52 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
package org.tensorflow.types;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import org.tensorflow.DataType;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utility class for managing the representation of TensorFlow types as Java
|
||||||
|
* types. For each TensorFlow type (e.g., int32), there is a corresponding Java
|
||||||
|
* type (e.g., TFInt32) that represents it at compile time and a corresponding
|
||||||
|
* class object (e.g., TFInt32.class) that represents it at run time. There is
|
||||||
|
* also an enumeration value in DataType that can be used to represent the
|
||||||
|
* type, though that should rarely be required.
|
||||||
|
*/
|
||||||
|
public class Types {
|
||||||
|
|
||||||
|
private Types() {} // not instantiable
|
||||||
|
|
||||||
|
static final Map<Class<?>, DataType> typeCodes = new HashMap<>();
|
||||||
|
|
||||||
|
/** Returns the DataType value corresponding to a TensorFlow type class. */
|
||||||
|
public static DataType dataType(Class<? extends TFType> c) {
|
||||||
|
DataType dtype = typeCodes.get(c);
|
||||||
|
if (dtype == null) {
|
||||||
|
throw new IllegalArgumentException("" + c + " is not a TensorFlow type.");
|
||||||
|
}
|
||||||
|
return dtype;
|
||||||
|
}
|
||||||
|
|
||||||
|
static final Map<Class<?>, Object> scalars = new HashMap<>();
|
||||||
|
|
||||||
|
/** Returns the zero value of type described by {@code c}, or null if
|
||||||
|
* the type (e.g., string) is not numeric and therefore has no zero value.
|
||||||
|
*/
|
||||||
|
public static Object zeroValue(Class<? extends TFType> c) {
|
||||||
|
return scalars.get(c);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,27 @@
|
|||||||
|
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Defines classes that represent TensorFlow data types. For each possible data type
|
||||||
|
* that can be used in a tensor, there is a corresponding class in this package that
|
||||||
|
* is used to represent it. For example, the TensorFlow int32 type is represented by
|
||||||
|
* the type TFInt32 and by the class object TFInt32.class. The former is used to
|
||||||
|
* support compile-time checking of tensor data types and the latter is used for
|
||||||
|
* run-time checking of data types. All such classes implement the TFType interface.
|
||||||
|
* TensorFlow data types are also separately represented by the DataType enum, with
|
||||||
|
* one enum value per data type. The enum representation should rarely be needed, but
|
||||||
|
* the Types class can be used to obtain it from the class object representation.
|
||||||
|
*/
|
||||||
|
package org.tensorflow.types;
|
@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
|
|||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
import org.junit.runners.JUnit4;
|
import org.junit.runners.JUnit4;
|
||||||
|
@ -303,7 +303,7 @@ class GradientsDebugger(object):
|
|||||||
"""Register the gradient tensor for an x-tensor.
|
"""Register the gradient tensor for an x-tensor.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x_tensor_name: (`str`) the name of the the independent `tf.Tensor`, i.e.,
|
x_tensor_name: (`str`) the name of the independent `tf.Tensor`, i.e.,
|
||||||
the tensor on the denominator of the differentiation.
|
the tensor on the denominator of the differentiation.
|
||||||
gradient_tensor: the gradient `tf.Tensor`.
|
gradient_tensor: the gradient `tf.Tensor`.
|
||||||
"""
|
"""
|
||||||
|
@ -78,7 +78,7 @@ class EstimatorSpec(
|
|||||||
|
|
||||||
Depending on the value of `mode`, different arguments are required. Namely
|
Depending on the value of `mode`, different arguments are required. Namely
|
||||||
* For `mode == ModeKeys.TRAIN`: required fields are `loss` and `train_op`.
|
* For `mode == ModeKeys.TRAIN`: required fields are `loss` and `train_op`.
|
||||||
* For `mode == ModeKeys.EVAL`: required field is`loss`.
|
* For `mode == ModeKeys.EVAL`: required field is `loss`.
|
||||||
* For `mode == ModeKeys.PREDICT`: required fields are `predictions`.
|
* For `mode == ModeKeys.PREDICT`: required fields are `predictions`.
|
||||||
|
|
||||||
model_fn can populate all arguments independent of mode. In this case, some
|
model_fn can populate all arguments independent of mode. In this case, some
|
||||||
|
@ -617,7 +617,7 @@ def call_cpp_shape_fn(op, require_shape_fn=True):
|
|||||||
|
|
||||||
def _call_cpp_shape_fn_impl(
|
def _call_cpp_shape_fn_impl(
|
||||||
op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn):
|
op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn):
|
||||||
"""Core implementaton of call_cpp_shape_fn."""
|
"""Core implementation of call_cpp_shape_fn."""
|
||||||
graph_def_version = op.graph.graph_def_versions.producer
|
graph_def_version = op.graph.graph_def_versions.producer
|
||||||
node_def_str = op.node_def.SerializeToString()
|
node_def_str = op.node_def.SerializeToString()
|
||||||
|
|
||||||
|
@ -29,12 +29,12 @@ class ArgMaxTest(test.TestCase):
|
|||||||
def _testArg(self,
|
def _testArg(self,
|
||||||
method,
|
method,
|
||||||
x,
|
x,
|
||||||
dimension,
|
axis,
|
||||||
expected_values,
|
expected_values,
|
||||||
use_gpu=False,
|
use_gpu=False,
|
||||||
expected_err_re=None):
|
expected_err_re=None):
|
||||||
with self.test_session(use_gpu=use_gpu):
|
with self.test_session(use_gpu=use_gpu):
|
||||||
ans = method(x, dimension=dimension)
|
ans = method(x, axis=axis)
|
||||||
if expected_err_re is None:
|
if expected_err_re is None:
|
||||||
tf_ans = ans.eval()
|
tf_ans = ans.eval()
|
||||||
# Defaults to int64 output.
|
# Defaults to int64 output.
|
||||||
@ -48,27 +48,26 @@ class ArgMaxTest(test.TestCase):
|
|||||||
def _testBothArg(self,
|
def _testBothArg(self,
|
||||||
method,
|
method,
|
||||||
x,
|
x,
|
||||||
dimension,
|
axis,
|
||||||
expected_values,
|
expected_values,
|
||||||
expected_err_re=None):
|
expected_err_re=None):
|
||||||
self._testArg(method, x, dimension, expected_values, True, expected_err_re)
|
self._testArg(method, x, axis, expected_values, True, expected_err_re)
|
||||||
self._testArg(method, x, dimension, expected_values, False, expected_err_re)
|
self._testArg(method, x, axis, expected_values, False, expected_err_re)
|
||||||
|
|
||||||
def _testBasic(self, dtype):
|
def _testBasic(self, dtype):
|
||||||
x = np.asarray(100 * np.random.randn(200), dtype=dtype)
|
x = np.asarray(100 * np.random.randn(200), dtype=dtype)
|
||||||
|
|
||||||
# Check that argmin and argmax match numpy along the primary
|
# Check that argmin and argmax match numpy along the primary axis
|
||||||
# dimension
|
|
||||||
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
|
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
|
||||||
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
|
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
|
||||||
|
|
||||||
def _testDim(self, dtype):
|
def _testDim(self, dtype):
|
||||||
x = np.asarray(100 * np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
|
x = np.asarray(100 * np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
|
||||||
|
|
||||||
# Check that argmin and argmax match numpy along all dimensions
|
# Check that argmin and argmax match numpy along all axes
|
||||||
for dim in range(-5, 5):
|
for axis in range(-5, 5):
|
||||||
self._testBothArg(math_ops.argmax, x, dim, x.argmax(dim))
|
self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
|
||||||
self._testBothArg(math_ops.argmin, x, dim, x.argmin(dim))
|
self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))
|
||||||
|
|
||||||
def testFloat(self):
|
def testFloat(self):
|
||||||
self._testBasic(np.float32)
|
self._testBasic(np.float32)
|
||||||
@ -78,7 +77,7 @@ class ArgMaxTest(test.TestCase):
|
|||||||
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
|
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
|
||||||
expected_values = x.argmax()
|
expected_values = x.argmax()
|
||||||
with self.test_session(use_gpu=True):
|
with self.test_session(use_gpu=True):
|
||||||
ans = math_ops.argmax(x, dimension=0, output_type=dtypes.int32)
|
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
|
||||||
tf_ans = ans.eval()
|
tf_ans = ans.eval()
|
||||||
self.assertEqual(np.int32, tf_ans.dtype)
|
self.assertEqual(np.int32, tf_ans.dtype)
|
||||||
# The values are equal when comparing int32 to int64 because
|
# The values are equal when comparing int32 to int64 because
|
||||||
@ -86,7 +85,7 @@ class ArgMaxTest(test.TestCase):
|
|||||||
self.assertAllEqual(tf_ans, expected_values)
|
self.assertAllEqual(tf_ans, expected_values)
|
||||||
expected_values = x.argmin()
|
expected_values = x.argmin()
|
||||||
with self.test_session(use_gpu=True):
|
with self.test_session(use_gpu=True):
|
||||||
ans = math_ops.argmin(x, dimension=0, output_type=dtypes.int32)
|
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
|
||||||
tf_ans = ans.eval()
|
tf_ans = ans.eval()
|
||||||
self.assertEqual(np.int32, tf_ans.dtype)
|
self.assertEqual(np.int32, tf_ans.dtype)
|
||||||
self.assertAllEqual(tf_ans, expected_values)
|
self.assertAllEqual(tf_ans, expected_values)
|
||||||
|
@ -537,7 +537,7 @@ class Layer(object):
|
|||||||
if x.get_shape().ndims is None:
|
if x.get_shape().ndims is None:
|
||||||
raise ValueError('Input ' + str(input_index) + ' of layer ' +
|
raise ValueError('Input ' + str(input_index) + ' of layer ' +
|
||||||
self.name + ' is incompatible with the layer: '
|
self.name + ' is incompatible with the layer: '
|
||||||
'its rank is undefined, by the layer requires a '
|
'its rank is undefined, but the layer requires a '
|
||||||
'defined rank.')
|
'defined rank.')
|
||||||
|
|
||||||
# Check ndim.
|
# Check ndim.
|
||||||
|
@ -512,8 +512,9 @@ class QueueBase(object):
|
|||||||
the given queue. Subsequent `enqueue` and `enqueue_many`
|
the given queue. Subsequent `enqueue` and `enqueue_many`
|
||||||
operations will fail. Subsequent `dequeue` and `dequeue_many`
|
operations will fail. Subsequent `dequeue` and `dequeue_many`
|
||||||
operations will continue to succeed if sufficient elements remain
|
operations will continue to succeed if sufficient elements remain
|
||||||
in the queue. Subsequent `dequeue` and `dequeue_many` operations
|
in the queue. Subsequently dequeue and dequeue_many operations
|
||||||
that would block will fail immediately.
|
that would otherwise block waiting for more elements (if close
|
||||||
|
hadn't been called) will now fail immediately.
|
||||||
|
|
||||||
If `cancel_pending_enqueues` is `True`, all pending requests will also
|
If `cancel_pending_enqueues` is `True`, all pending requests will also
|
||||||
be canceled.
|
be canceled.
|
||||||
|
@ -1110,7 +1110,7 @@ def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
|
|||||||
Args:
|
Args:
|
||||||
sp_ids: A single `SparseTensor` with `values` property of type `int32`
|
sp_ids: A single `SparseTensor` with `values` property of type `int32`
|
||||||
or `int64` or a Python list of such `SparseTensor`s or a list thereof.
|
or `int64` or a Python list of such `SparseTensor`s or a list thereof.
|
||||||
sp_values: A`SparseTensor` of any type.
|
sp_values: A `SparseTensor` of any type.
|
||||||
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
|
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
|
||||||
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
|
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
|
||||||
Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for
|
Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for
|
||||||
|
@ -115,10 +115,15 @@ def freeze_graph_with_def_protos(
|
|||||||
output_node_names.split(","),
|
output_node_names.split(","),
|
||||||
variable_names_blacklist=variable_names_blacklist)
|
variable_names_blacklist=variable_names_blacklist)
|
||||||
|
|
||||||
with gfile.GFile(output_graph, "wb") as f:
|
# Write GraphDef to file if output path has been given.
|
||||||
f.write(output_graph_def.SerializeToString())
|
if output_graph:
|
||||||
|
with gfile.GFile(output_graph, "wb") as f:
|
||||||
|
f.write(output_graph_def.SerializeToString())
|
||||||
|
|
||||||
print("%d ops in the final graph." % len(output_graph_def.node))
|
print("%d ops in the final graph." % len(output_graph_def.node))
|
||||||
|
|
||||||
|
return output_graph_def
|
||||||
|
|
||||||
|
|
||||||
def _parse_input_graph_proto(input_graph, input_binary):
|
def _parse_input_graph_proto(input_graph, input_binary):
|
||||||
"""Parser input tensorflow graph into GraphDef proto."""
|
"""Parser input tensorflow graph into GraphDef proto."""
|
||||||
|
@ -64,11 +64,13 @@ if __name__ == "__main__":
|
|||||||
"--model_dir",
|
"--model_dir",
|
||||||
type=str,
|
type=str,
|
||||||
default="",
|
default="",
|
||||||
|
required=True,
|
||||||
help="The location of the protobuf (\'pb\') model to visualize.")
|
help="The location of the protobuf (\'pb\') model to visualize.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--log_dir",
|
"--log_dir",
|
||||||
type=str,
|
type=str,
|
||||||
default="",
|
default="",
|
||||||
|
required=True,
|
||||||
help="The location for the Tensorboard log to begin visualization from.")
|
help="The location for the Tensorboard log to begin visualization from.")
|
||||||
FLAGS, unparsed = parser.parse_known_args()
|
FLAGS, unparsed = parser.parse_known_args()
|
||||||
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
||||||
|
@ -271,7 +271,8 @@ class OptimizeForInferenceTest(test.TestCase):
|
|||||||
|
|
||||||
for node in optimized_graph_def.node:
|
for node in optimized_graph_def.node:
|
||||||
self.assertNotEqual("Conv2D", node.op)
|
self.assertNotEqual("Conv2D", node.op)
|
||||||
self.assertNotEqual("ResizeBilinear", node.op)
|
self.assertNotEqual("MirrorPad", node.op)
|
||||||
|
|
||||||
|
|
||||||
def testFusePadAndConv(self):
|
def testFusePadAndConv(self):
|
||||||
with self.test_session() as sess:
|
with self.test_session() as sess:
|
||||||
@ -299,7 +300,7 @@ class OptimizeForInferenceTest(test.TestCase):
|
|||||||
|
|
||||||
for node in optimized_graph_def.node:
|
for node in optimized_graph_def.node:
|
||||||
self.assertNotEqual("Conv2D", node.op)
|
self.assertNotEqual("Conv2D", node.op)
|
||||||
self.assertNotEqual("MirrorPad", node.op)
|
self.assertNotEqual("ResizeBilinear", node.op)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -117,11 +117,9 @@ def if_not_windows(a):
|
|||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
def if_x86(a):
|
def if_linux_x86_64(a):
|
||||||
return select({
|
return select({
|
||||||
clean_dep("//tensorflow:linux_x86_64"): a,
|
clean_dep("//tensorflow:linux_x86_64"): a,
|
||||||
clean_dep("//tensorflow:windows"): a,
|
|
||||||
clean_dep("//tensorflow:windows_msvc"): a,
|
|
||||||
"//conditions:default": [],
|
"//conditions:default": [],
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -152,7 +150,7 @@ def tf_copts():
|
|||||||
"-Wno-sign-compare",
|
"-Wno-sign-compare",
|
||||||
"-fno-exceptions",
|
"-fno-exceptions",
|
||||||
]) + if_cuda(["-DGOOGLE_CUDA=1"]) + if_mkl(["-DINTEL_MKL=1", "-fopenmp",]) + if_android_arm(
|
]) + if_cuda(["-DGOOGLE_CUDA=1"]) + if_mkl(["-DINTEL_MKL=1", "-fopenmp",]) + if_android_arm(
|
||||||
["-mfpu=neon"]) + if_x86(["-msse3"]) + select({
|
["-mfpu=neon"]) + if_linux_x86_64(["-msse3"]) + select({
|
||||||
clean_dep("//tensorflow:android"): [
|
clean_dep("//tensorflow:android"): [
|
||||||
"-std=c++11",
|
"-std=c++11",
|
||||||
"-DTF_LEAN_BINARY",
|
"-DTF_LEAN_BINARY",
|
||||||
|
@ -418,6 +418,10 @@ do_pip_smoke_test() {
|
|||||||
"The pip smoke test failed."
|
"The pip smoke test failed."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
do_code_link_check() {
|
||||||
|
tensorflow/tools/ci_build/code_link_check.sh
|
||||||
|
}
|
||||||
|
|
||||||
do_check_load_py_test() {
|
do_check_load_py_test() {
|
||||||
BUILD_CMD="bazel build //tensorflow/tools/pip_package:check_load_py_test"
|
BUILD_CMD="bazel build //tensorflow/tools/pip_package:check_load_py_test"
|
||||||
${BUILD_CMD}
|
${BUILD_CMD}
|
||||||
@ -431,8 +435,8 @@ do_check_load_py_test() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Supply all sanity step commands and descriptions
|
# Supply all sanity step commands and descriptions
|
||||||
SANITY_STEPS=("do_pylint PYTHON2" "do_pylint PYTHON3" "do_buildifier" "do_bazel_nobuild" "do_pip_package_licenses_check" "do_lib_package_licenses_check" "do_java_package_licenses_check" "do_pip_smoke_test" "do_check_load_py_test")
|
SANITY_STEPS=("do_pylint PYTHON2" "do_pylint PYTHON3" "do_buildifier" "do_bazel_nobuild" "do_pip_package_licenses_check" "do_lib_package_licenses_check" "do_java_package_licenses_check" "do_pip_smoke_test" "do_check_load_py_test" "do_code_link_check")
|
||||||
SANITY_STEPS_DESC=("Python 2 pylint" "Python 3 pylint" "buildifier check" "bazel nobuild" "pip: license check for external dependencies" "C library: license check for external dependencies" "Java Native Library: license check for external dependencies" "Pip Smoke Test: Checking py_test dependencies exist in pip package" "Check load py_test: Check that BUILD files with py_test target properly load py_test")
|
SANITY_STEPS_DESC=("Python 2 pylint" "Python 3 pylint" "buildifier check" "bazel nobuild" "pip: license check for external dependencies" "C library: license check for external dependencies" "Java Native Library: license check for external dependencies" "Pip Smoke Test: Checking py_test dependencies exist in pip package" "Check load py_test: Check that BUILD files with py_test target properly load py_test" "Code Link Check: Check there are no broken links")
|
||||||
|
|
||||||
INCREMENTAL_FLAG=""
|
INCREMENTAL_FLAG=""
|
||||||
|
|
||||||
|
42
tensorflow/tools/ci_build/code_link_check.sh
Executable file
42
tensorflow/tools/ci_build/code_link_check.sh
Executable file
@ -0,0 +1,42 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
# ==============================================================================
|
||||||
|
|
||||||
|
# please run this at root directory of tensorflow
|
||||||
|
success=1
|
||||||
|
|
||||||
|
for i in `grep -onI https://www.tensorflow.org/code/\[a-zA-Z0-9/._-\]\* -r tensorflow`
|
||||||
|
do
|
||||||
|
filename=`echo $i|awk -F: '{print $1}'`
|
||||||
|
linenumber=`echo $i|awk -F: '{print $2}'`
|
||||||
|
target=`echo $i|awk -F: '{print $4}'|tail -c +27`
|
||||||
|
|
||||||
|
# skip files in tensorflow/models
|
||||||
|
if [[ $target == tensorflow_models/* ]] ; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f $target ] && [ ! -d $target ]; then
|
||||||
|
success=0
|
||||||
|
echo Broken link $target at line $linenumber of file $filename
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $success == 0 ]; then
|
||||||
|
echo Code link check fails.
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo Code link check success.
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
GOLANG_URL="https://storage.googleapis.com/golang/go1.7.5.linux-amd64.tar.gz"
|
GOLANG_URL="https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz"
|
||||||
|
|
||||||
sudo mkdir -p /usr/local
|
sudo mkdir -p /usr/local
|
||||||
wget -q -O - "${GOLANG_URL}" | sudo tar -C /usr/local -xz
|
wget -q -O - "${GOLANG_URL}" | sudo tar -C /usr/local -xz
|
||||||
|
@ -60,7 +60,7 @@ reinstall_tensorflow_pip ${PIP_NAME}
|
|||||||
|
|
||||||
# Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
|
# Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
|
||||||
# which will result testing system installed tensorflow
|
# which will result testing system installed tensorflow
|
||||||
# GPU tests are very flaky when running concurently, so set local_test_jobs=1
|
# GPU tests are very flaky when running concurrently, so set local_test_jobs=1
|
||||||
bazel test -c opt --config=win-cuda $BUILD_OPTS -k --test_output=errors \
|
bazel test -c opt --config=win-cuda $BUILD_OPTS -k --test_output=errors \
|
||||||
--define=no_tensorflow_py_deps=true --test_lang_filters=py \
|
--define=no_tensorflow_py_deps=true --test_lang_filters=py \
|
||||||
--test_tag_filters=-no_pip,-no_windows,-no_windows_gpu \
|
--test_tag_filters=-no_pip,-no_windows,-no_windows_gpu \
|
||||||
|
@ -205,11 +205,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
|
|||||||
native.new_http_archive(
|
native.new_http_archive(
|
||||||
name = "farmhash_archive",
|
name = "farmhash_archive",
|
||||||
urls = [
|
urls = [
|
||||||
"http://mirror.bazel.build/github.com/google/farmhash/archive/92e897b282426729f4724d91a637596c7e2fe28f.zip",
|
"http://mirror.bazel.build/github.com/google/farmhash/archive/23eecfbe7e84ebf2e229bd02248f431c36e12f1a.zip",
|
||||||
"https://github.com/google/farmhash/archive/92e897b282426729f4724d91a637596c7e2fe28f.zip",
|
"https://github.com/google/farmhash/archive/23eecfbe7e84ebf2e229bd02248f431c36e12f1a.zip",
|
||||||
],
|
],
|
||||||
sha256 = "4c626d1f306bda2c6804ab955892f803f5245f4dcaecb4979dc08b091256da54",
|
sha256 = "55215f8cd3ddbe9781f6fe5cc228731d6dcc8301b6191c6d420034c3fff1cb8d",
|
||||||
strip_prefix = "farmhash-92e897b282426729f4724d91a637596c7e2fe28f",
|
strip_prefix = "farmhash-23eecfbe7e84ebf2e229bd02248f431c36e12f1a",
|
||||||
build_file = str(Label("//third_party:farmhash.BUILD")),
|
build_file = str(Label("//third_party:farmhash.BUILD")),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -11,6 +11,9 @@ build:mkl --define=using_mkl=true
|
|||||||
build:sycl --crosstool_top=@local_config_sycl//crosstool:toolchain
|
build:sycl --crosstool_top=@local_config_sycl//crosstool:toolchain
|
||||||
build:sycl --define=using_sycl=true
|
build:sycl --define=using_sycl=true
|
||||||
|
|
||||||
|
build:sycl_nodouble --crosstool_top=@local_config_sycl//crosstool:toolchain
|
||||||
|
build:sycl_nodouble --define=using_sycl=true --cxxopt -DTENSORFLOW_SYCL_NO_DOUBLE
|
||||||
|
|
||||||
build:sycl_asan --crosstool_top=@local_config_sycl//crosstool:toolchain
|
build:sycl_asan --crosstool_top=@local_config_sycl//crosstool:toolchain
|
||||||
build:sycl_asan --define=using_sycl=true --copt -fno-omit-frame-pointer --copt -fsanitize-coverage=3 --copt -DGPR_NO_DIRECT_SYSCALLS --linkopt -fPIC --linkopt -fsanitize=address
|
build:sycl_asan --define=using_sycl=true --copt -fno-omit-frame-pointer --copt -fsanitize-coverage=3 --copt -DGPR_NO_DIRECT_SYSCALLS --linkopt -fPIC --linkopt -fsanitize=address
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user