Merge changes from GitHub, clean up linter errors, fix dependencies test.
Change: 114064632
This commit is contained in:
parent
d53f06b6a3
commit
59f1eba5fb
tensorflow
cc/ops
contrib/layers/python/framework
core
kernels
binary_linalg_ops_common.ccconstant_op_gpu.cu.ccconv_ops_gpu.hconv_ops_gpu_3.cu.cccore_ops_test.cccwise_ops.hfifo_queue.ccinitializable_lookup_table.hlookup_table_op.ccmatrix_inverse_op.ccmatrix_solve_op.ccops_util.hpad_op.ccrandom_shuffle_queue_op.ccrange_sampler.ccrange_sampler.hsegment_reduction_ops.ccsparse_split_op.ccsummary_op.cctensor_array_ops.ccxent_op.h
lib/core
platform
examples
android/src/org/tensorflow/demo
tutorials/mnist
udacity
models/embedding
python
framework
kernel_tests
cwise_ops_test.pylearn_test.pypadding_fifo_queue_test.pyparsing_ops_test.pypy_func_test.pyrnn_test.py
ops
platform/default
tools
training
tools/ci_build
util/python
@ -255,13 +255,24 @@ Returns a pointer to the created Node)comment");
|
||||
}
|
||||
|
||||
// Converts:
|
||||
// bazel-out/.../genfiles/XX
|
||||
// bazel-out/.../genfiles/(external/YYY/)?XX
|
||||
// to: XX.
|
||||
string GetPath(const std::string& dot_h_fname) {
|
||||
auto pos = dot_h_fname.find("/genfiles/");
|
||||
if (pos == string::npos) return dot_h_fname;
|
||||
// - 1 account for the terminating null character (\0) in "/genfiles/".
|
||||
return dot_h_fname.substr(pos + sizeof("/genfiles/") - 1);
|
||||
string result = dot_h_fname;
|
||||
if (pos != string::npos) {
|
||||
// - 1 account for the terminating null character (\0) in "/genfiles/".
|
||||
result = dot_h_fname.substr(pos + sizeof("/genfiles/") - 1);
|
||||
}
|
||||
if (result.size() > sizeof("external/") &&
|
||||
result.compare(0, sizeof("external/") - 1, "external/") == 0) {
|
||||
result = result.substr(sizeof("external/") - 1);
|
||||
pos = result.find("/");
|
||||
if (pos != string::npos) {
|
||||
result = result.substr(pos + 1);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Converts:
|
||||
|
@ -22,6 +22,7 @@ filegroup(
|
||||
py_library(
|
||||
name = "tensor_util",
|
||||
srcs = ["tensor_util.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python:framework",
|
||||
],
|
||||
@ -31,6 +32,7 @@ py_test(
|
||||
name = "tensor_util_test",
|
||||
size = "small",
|
||||
srcs = ["tensor_util_test.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":tensor_util",
|
||||
"//tensorflow:tensorflow_py",
|
||||
|
@ -38,7 +38,7 @@ void BinaryLinearAlgebraOpBase::Compute(OpKernelContext* context) {
|
||||
for (int dim = 0; dim < (in_rhs.dims() - 2); ++dim) {
|
||||
OP_REQUIRES(context, in_rhs.dim_size(dim) == in_lhs.dim_size(dim),
|
||||
errors::InvalidArgument(
|
||||
"Dimension mistmatch: %d != %d for dimension %d",
|
||||
"Dimension mismatch: %d != %d for dimension %d",
|
||||
in_lhs.dim_size(dim), in_rhs.dim_size(dim), dim));
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
|
||||
#define EIGEN_USE_GPU
|
||||
|
||||
#include "tensorflow/core/framework/register_types.h"
|
||||
#include "tensorflow/core/framework/tensor_types.h"
|
||||
#include "tensorflow/core/kernels/fill_functor.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
@ -78,13 +79,8 @@ struct FillFunctor<GPUDevice, T> {
|
||||
};
|
||||
|
||||
#define DEFINE_FILL_GPU(T) template struct FillFunctor<GPUDevice, T>
|
||||
DEFINE_FILL_GPU(float);
|
||||
DEFINE_FILL_GPU(double);
|
||||
DEFINE_FILL_GPU(int32);
|
||||
DEFINE_FILL_GPU(uint8);
|
||||
DEFINE_FILL_GPU(int16);
|
||||
DEFINE_FILL_GPU(int8);
|
||||
DEFINE_FILL_GPU(int64);
|
||||
TF_CALL_REAL_NUMBER_TYPES(DEFINE_FILL_GPU);
|
||||
DEFINE_FILL_GPU(bool);
|
||||
#undef DEFINE_FILL_GPU
|
||||
|
||||
// Partial specialization of FillFunctor<Device=GPUDevice, T>.
|
||||
|
@ -23,7 +23,7 @@ limitations under the License.
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
// TODO(zhengxq): move this to gpu_util.h. The use of such wrapers is wide
|
||||
// TODO(zhengxq): move this to gpu_util.h. The use of such wrappers is wide
|
||||
// spread.
|
||||
template <typename T>
|
||||
perftools::gputools::DeviceMemory<T> AsDeviceMemory(const T* cuda_memory,
|
||||
|
@ -101,7 +101,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int TensorIndexToFlat(
|
||||
return flat_index;
|
||||
}
|
||||
|
||||
// A helper function that converts a flat arrary index into a tensor index.
|
||||
// A helper function that converts a flat array index into a tensor index.
|
||||
template <int IndexCount>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index<IndexCount> FlatToTensorIndex(
|
||||
int index, const Dimension<IndexCount>& dims) {
|
||||
|
@ -119,7 +119,7 @@ static void BM_ConvFloat(int iters, int batch, int rows, int cols, int in_depth,
|
||||
if (op == CONV_OP_FORWARD) {
|
||||
// Forward computation:
|
||||
// BATCH x OUT_ROW X OUT_COL X IN_DEPTH X PATCH_ROW X PATH_COL X OUT_DEPTH
|
||||
// We multiply by two since there are mutliplications and additions.
|
||||
// We multiply by two since there are multiplications and additions.
|
||||
num_ops = static_cast<int64>(batch * in_depth * out_depth) *
|
||||
static_cast<int64>(filter_rows * filter_cols) *
|
||||
static_cast<int64>(out_rows * out_cols) * 2;
|
||||
@ -127,7 +127,7 @@ static void BM_ConvFloat(int iters, int batch, int rows, int cols, int in_depth,
|
||||
// Backward computation: both input and filter backprop take the same
|
||||
// amount of computation:
|
||||
// BATCH x IN_ROW X IN_COL X IN_DEPTH X PATCH_ROW X PATCH_COL X OUT_DEPTH
|
||||
// We multiply by two since there are mutliplications and additions.
|
||||
// We multiply by two since there are multiplications and additions.
|
||||
num_ops = static_cast<int64>(batch * in_depth * out_depth) *
|
||||
static_cast<int64>(filter_rows * filter_cols) *
|
||||
static_cast<int64>(rows * cols) * 2;
|
||||
|
@ -302,7 +302,7 @@ struct use_bcast_optimization<double> {
|
||||
// sqrt(x) = x^(1/2)
|
||||
// rsqrt(x) = x^(-1/2)
|
||||
// exp(x) = e^x
|
||||
// log(x) = natural logrithm of x
|
||||
// log(x) = natural logarithm of x
|
||||
// tanh = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
|
||||
// sigmoid = 1 / (1 + exp(-x)) // a.k.a, logistic
|
||||
//
|
||||
|
@ -282,7 +282,7 @@ void FIFOQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
|
||||
for (; s > 0; --s) {
|
||||
if (attempt->tuple.empty()) {
|
||||
// Only allocate tuple when we have something to dequeue
|
||||
// so we don't use exceessive memory when there are many
|
||||
// so we don't use excessive memory when there are many
|
||||
// blocked dequeue attempts waiting.
|
||||
attempt->tuple.reserve(num_components());
|
||||
for (int i = 0; i < num_components(); ++i) {
|
||||
|
@ -86,7 +86,7 @@ class InitializableLookupTable : public LookupInterface {
|
||||
// Returns a tensor that contains the current batch of 'value' values.
|
||||
virtual const Tensor& values() const = 0;
|
||||
|
||||
// Returns an error if one has occurred, otherwire returns Status::OK.
|
||||
// Returns an error if one has occurred, otherwise returns Status::OK.
|
||||
virtual Status status() const = 0;
|
||||
|
||||
// Returns the total number of elements that the iterator will produce.
|
||||
|
@ -31,7 +31,7 @@ namespace lookup {
|
||||
// Lookup table that wraps an unordered_map, where the key and value data type
|
||||
// is specified.
|
||||
//
|
||||
// This table is recommened for any variations to key values.
|
||||
// This table is recommended for any variations to key values.
|
||||
//
|
||||
// For look up, the table is required to be initialized (allocated
|
||||
// and populated). Once the table is marked as initialized it becomes read-only.
|
||||
|
@ -77,10 +77,10 @@ class MatrixInverseOp
|
||||
}
|
||||
}
|
||||
Eigen::PartialPivLU<Matrix> lu_decomposition(input);
|
||||
// While PartialPivLU cannot give strong guarantees on invertability,
|
||||
// While PartialPivLU cannot give strong guarantees on invertibility,
|
||||
// we can at least guard against exact zero pivots. This can occur as
|
||||
// a result of basic user mistakes, such as providing integer valued
|
||||
// matrices that are exacly singular, or due to underflow if this
|
||||
// matrices that are exactly singular, or due to underflow if this
|
||||
// code is run with denormals being flushed to zero.
|
||||
// TODO(rmlarsen): Add check based on condition number estimation.
|
||||
const Scalar min_abs_pivot =
|
||||
|
@ -81,10 +81,10 @@ class MatrixSolveOp
|
||||
return;
|
||||
}
|
||||
Eigen::PartialPivLU<Matrix> lu_decomposition(matrix);
|
||||
// While PartialPivLU cannot give strong guarantees on invertability,
|
||||
// While PartialPivLU cannot give strong guarantees on invertibility,
|
||||
// we can at least guard against exact zero pivots. This can occur as
|
||||
// a result of basic user mistakes such providing integer valued
|
||||
// matrices that are exacly singular, or due to underflow if this
|
||||
// matrices that are exactly singular, or due to underflow if this
|
||||
// code is run with denormals being flushed to zero.
|
||||
// TODO(rmlarsen): Add check based on condition number estimation.
|
||||
const Scalar min_abs_pivot =
|
||||
|
@ -120,7 +120,7 @@ bool IsInnerDimsSizeAligned(const TensorShape& s) {
|
||||
}
|
||||
|
||||
// Returns in 'col_data', image patches in storage order (height, width, depth)
|
||||
// extracted from image at 'input_data', which is requred to be in storage
|
||||
// extracted from image at 'input_data', which is required to be in storage
|
||||
// order (batch, height, width, depth).
|
||||
// Implementation written by Yangqing Jia (jiayq).
|
||||
template <typename T>
|
||||
|
@ -72,7 +72,7 @@ class PadOp : public OpKernel {
|
||||
TTypes<int32>::ConstMatrix paddings = in1.matrix<int32>();
|
||||
for (int d = 0; d < fixed_dims; ++d) {
|
||||
const int32 before_d = paddings(d, 0); // Pad before existing elements.
|
||||
const int32 after_d = paddings(d, 1); // Pad after exisitng elements.
|
||||
const int32 after_d = paddings(d, 1); // Pad after existing elements.
|
||||
OP_REQUIRES(context, before_d >= 0 && after_d >= 0,
|
||||
errors::InvalidArgument("Paddings must be non-negative: ",
|
||||
before_d, " ", after_d));
|
||||
|
@ -326,7 +326,7 @@ void RandomShuffleQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
|
||||
for (; s > 0; --s) {
|
||||
if (attempt->tuple.empty()) {
|
||||
// Only allocate tuple when we have something to dequeue
|
||||
// so we don't use exceessive memory when there are many
|
||||
// so we don't use excessive memory when there are many
|
||||
// blocked dequeue attempts waiting.
|
||||
attempt->tuple.reserve(num_components());
|
||||
for (int i = 0; i < num_components(); ++i) {
|
||||
|
@ -213,7 +213,7 @@ float UnigramSampler::Probability(int64 value) const {
|
||||
return unsafe_sampler_.Probability(value);
|
||||
}
|
||||
|
||||
// Overriding at a high level results in far fewer lock aquisitions.
|
||||
// Overriding at a high level results in far fewer lock acquisitions.
|
||||
void UnigramSampler::SampleBatchGetExpectedCountAvoid(
|
||||
random::SimplePhilox* rnd, bool unique, MutableArraySlice<int64> batch,
|
||||
MutableArraySlice<float> batch_expected_count, ArraySlice<int64> extras,
|
||||
|
@ -188,7 +188,7 @@ class UnigramSampler : public RangeSampler {
|
||||
|
||||
float Probability(int64 value) const override;
|
||||
|
||||
// Overriding at a high level results in far fewer lock aquisitions.
|
||||
// Overriding at a high level results in far fewer lock acquisitions.
|
||||
void SampleBatchGetExpectedCountAvoid(
|
||||
random::SimplePhilox* rnd, bool unique,
|
||||
gtl::MutableArraySlice<int64> batch,
|
||||
|
@ -102,7 +102,7 @@ class SegmentReductionOp : public OpKernel {
|
||||
Eigen::Unaligned> OutT;
|
||||
T* out_slice_ptr = &output_flat(segment_vec(start), 0);
|
||||
OutT out_slice(out_slice_ptr, out_slice_shape);
|
||||
// We don't use out_slice.device(context->egien_device<Device>)
|
||||
// We don't use out_slice.device(context->eigen_device<Device>)
|
||||
// because these pieces of work are likely to be very small and
|
||||
// the context switching overhead dwarfs any benefit we get from
|
||||
// using another thread to do this work.
|
||||
|
@ -37,7 +37,7 @@ class SparseSplitOp : public OpKernel {
|
||||
|
||||
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices.shape()),
|
||||
errors::InvalidArgument(
|
||||
"Input indices should be a matrix but recived shape ",
|
||||
"Input indices should be a matrix but received shape ",
|
||||
input_indices.shape().DebugString()));
|
||||
OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values.shape()),
|
||||
errors::InvalidArgument(
|
||||
|
@ -119,7 +119,7 @@ TF_CALL_REAL_NUMBER_TYPES(REGISTER)
|
||||
struct HistogramResource : public ResourceBase {
|
||||
histogram::ThreadSafeHistogram histogram;
|
||||
|
||||
string DebugString() override { return "A historam summary. Stats ..."; }
|
||||
string DebugString() override { return "A histogram summary. Stats ..."; }
|
||||
};
|
||||
|
||||
class SummaryMergeOp : public OpKernel {
|
||||
|
@ -47,7 +47,7 @@ Status GetHandle(const string& input_name, OpKernelContext* ctx,
|
||||
string* container, string* ta_handle) {
|
||||
{
|
||||
Tensor tensor;
|
||||
// Assuming that input_name is at position 0 for puposes of
|
||||
// Assuming that input_name is at position 0 for purposes of
|
||||
// has_input.
|
||||
TF_RETURN_IF_ERROR(ctx->mutable_input(input_name, &tensor, false));
|
||||
if (tensor.NumElements() != 2) {
|
||||
|
@ -92,7 +92,7 @@ struct XentEigenImpl {
|
||||
// sum(exp(logits - max_logits)) along classes.
|
||||
scratch.reshape(batch_only).device(d) = backprop.exp().sum(along_class);
|
||||
|
||||
// NOTE(keveman): Eigen on GPU dispatches to an optimized implementaion
|
||||
// NOTE(keveman): Eigen on GPU dispatches to an optimized implementation
|
||||
// for an expression of the form lhs = rhs.sum().
|
||||
// lhs = -rhs.sum() doesn't match the above pattern, so folding in the
|
||||
// negation before calling sum().
|
||||
|
@ -15,6 +15,7 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/core/lib/core/stringpiece.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
|
||||
@ -30,7 +31,7 @@ std::ostream& operator<<(std::ostream& o, StringPiece piece) {
|
||||
}
|
||||
|
||||
bool StringPiece::contains(StringPiece s) const {
|
||||
return memmem(data_, size_, s.data_, s.size_) != nullptr;
|
||||
return std::search(begin(), end(), s.begin(), s.end()) != nullptr;
|
||||
}
|
||||
|
||||
size_t StringPiece::find(char c, size_t pos) const {
|
||||
|
@ -23,10 +23,10 @@ limitations under the License.
|
||||
#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \
|
||||
defined(PLATFORM_GOOGLE_ANDROID)
|
||||
extern "C" {
|
||||
#include "external/jpeg_archive/jpeg-9a/jerror.h"
|
||||
#include "external/jpeg_archive/jpeg-9a/jinclude.h"
|
||||
#include "external/jpeg_archive/jpeg-9a/jpeglib.h"
|
||||
#include "external/jpeg_archive/jpeg-9a/transupp.h" // for rotations
|
||||
#include "jpeg-9a/jerror.h"
|
||||
#include "jpeg-9a/jinclude.h"
|
||||
#include "jpeg-9a/jpeglib.h"
|
||||
#include "jpeg-9a/transupp.h" // for rotations
|
||||
}
|
||||
#else
|
||||
#error Define the appropriate PLATFORM_<foo> macro for this platform
|
||||
|
@ -22,7 +22,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/platform/google/build_config/png.h"
|
||||
#elif defined(PLATFORM_POSIX) || defined(PLATFORM_POSIX_ANDROID) || \
|
||||
defined(PLATFORM_GOOGLE_ANDROID)
|
||||
#include "external/png_archive/libpng-1.2.53/png.h"
|
||||
#include "libpng-1.2.53/png.h"
|
||||
#else
|
||||
#error Define the appropriate PLATFORM_<foo> macro for this platform
|
||||
#endif
|
||||
|
@ -27,7 +27,7 @@ typedef ::StringPiece RegexpStringPiece;
|
||||
|
||||
#else
|
||||
|
||||
#include "external/re2/re2/re2.h"
|
||||
#include "re2/re2.h"
|
||||
namespace tensorflow {
|
||||
typedef re2::StringPiece RegexpStringPiece;
|
||||
} // namespace tensorflow
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
package org.tensorflow.demo;
|
||||
|
||||
import android.Manifest;
|
||||
import android.app.Activity;
|
||||
import android.app.AlertDialog;
|
||||
import android.app.Dialog;
|
||||
@ -23,6 +24,7 @@ import android.app.DialogFragment;
|
||||
import android.app.Fragment;
|
||||
import android.content.Context;
|
||||
import android.content.DialogInterface;
|
||||
import android.content.pm.PackageManager;
|
||||
import android.content.res.Configuration;
|
||||
import android.graphics.ImageFormat;
|
||||
import android.graphics.Matrix;
|
||||
@ -38,9 +40,11 @@ import android.hardware.camera2.CaptureResult;
|
||||
import android.hardware.camera2.TotalCaptureResult;
|
||||
import android.hardware.camera2.params.StreamConfigurationMap;
|
||||
import android.media.ImageReader;
|
||||
import android.os.Build;
|
||||
import android.os.Bundle;
|
||||
import android.os.Handler;
|
||||
import android.os.HandlerThread;
|
||||
import android.os.Process;
|
||||
import android.util.Size;
|
||||
import android.util.SparseIntArray;
|
||||
import android.view.LayoutInflater;
|
||||
@ -69,6 +73,8 @@ public class CameraConnectionFragment extends Fragment {
|
||||
*/
|
||||
private static final int MINIMUM_PREVIEW_SIZE = 320;
|
||||
|
||||
private static final int REQUEST_CAMERA_PERMISSION = 1;
|
||||
|
||||
private RecognitionScoreView scoreView;
|
||||
|
||||
/**
|
||||
@ -306,6 +312,41 @@ public class CameraConnectionFragment extends Fragment {
|
||||
super.onPause();
|
||||
}
|
||||
|
||||
private void requestCameraPermission() {
|
||||
if (shouldShowRequestPermissionRationaleCompat(this, Manifest.permission.CAMERA)) {
|
||||
new ConfirmationDialog().show(getChildFragmentManager(), FRAGMENT_DIALOG);
|
||||
} else {
|
||||
requestPermissionsCompat(
|
||||
this, new String[] {Manifest.permission.CAMERA}, REQUEST_CAMERA_PERMISSION);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean shouldShowRequestPermissionRationaleCompat(Fragment fragment, String permission) {
|
||||
if (Build.VERSION.SDK_INT >= 23) {
|
||||
return fragment.shouldShowRequestPermissionRationale(permission);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void requestPermissionsCompat(Fragment fragment, String[] permissions, int requestCode) {
|
||||
if (Build.VERSION.SDK_INT >= 23) {
|
||||
fragment.requestPermissions(permissions, requestCode);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRequestPermissionsResult(
|
||||
int requestCode, String[] permissions, int[] grantResults) {
|
||||
if (requestCode == REQUEST_CAMERA_PERMISSION) {
|
||||
if (grantResults.length != 1 || grantResults[0] != PackageManager.PERMISSION_GRANTED) {
|
||||
ErrorDialog.newInstance(getString(R.string.request_permission))
|
||||
.show(getChildFragmentManager(), FRAGMENT_DIALOG);
|
||||
}
|
||||
} else {
|
||||
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets up member variables related to camera.
|
||||
*
|
||||
@ -369,6 +410,11 @@ public class CameraConnectionFragment extends Fragment {
|
||||
* Opens the camera specified by {@link CameraConnectionFragment#cameraId}.
|
||||
*/
|
||||
private void openCamera(final int width, final int height) {
|
||||
if (checkSelfPermissionCompat(getActivity(), Manifest.permission.CAMERA)
|
||||
!= PackageManager.PERMISSION_GRANTED) {
|
||||
requestCameraPermission();
|
||||
return;
|
||||
}
|
||||
setUpCameraOutputs(width, height);
|
||||
configureTransform(width, height);
|
||||
final Activity activity = getActivity();
|
||||
@ -385,6 +431,13 @@ public class CameraConnectionFragment extends Fragment {
|
||||
}
|
||||
}
|
||||
|
||||
private int checkSelfPermissionCompat(Context context, String permission) {
|
||||
if (Build.VERSION.SDK_INT >= 23) {
|
||||
return context.checkSelfPermission(permission);
|
||||
}
|
||||
return context.checkPermission(permission, Process.myPid(), Process.myUid());
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the current {@link CameraDevice}.
|
||||
*/
|
||||
@ -580,6 +633,40 @@ public class CameraConnectionFragment extends Fragment {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Shows OK/Cancel confirmation dialog about camera permission.
|
||||
*/
|
||||
private class ConfirmationDialog extends DialogFragment {
|
||||
|
||||
@Override
|
||||
public Dialog onCreateDialog(Bundle savedInstanceState) {
|
||||
final Fragment parent = getParentFragment();
|
||||
return new AlertDialog.Builder(getActivity())
|
||||
.setMessage(R.string.request_permission)
|
||||
.setPositiveButton(
|
||||
android.R.string.ok,
|
||||
new DialogInterface.OnClickListener() {
|
||||
@Override
|
||||
public void onClick(DialogInterface dialog, int which) {
|
||||
requestPermissionsCompat(
|
||||
parent, new String[] {Manifest.permission.CAMERA}, REQUEST_CAMERA_PERMISSION);
|
||||
}
|
||||
})
|
||||
.setNegativeButton(
|
||||
android.R.string.cancel,
|
||||
new DialogInterface.OnClickListener() {
|
||||
@Override
|
||||
public void onClick(DialogInterface dialog, int which) {
|
||||
Activity activity = parent.getActivity();
|
||||
if (activity != null) {
|
||||
activity.finish();
|
||||
}
|
||||
}
|
||||
})
|
||||
.create();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Shows an error message dialog.
|
||||
*/
|
||||
|
@ -21,7 +21,7 @@ http://tensorflow.org/tutorials/mnist/beginners/index.md
|
||||
See documentation on the TensorBoard specific pieces at
|
||||
http://tensorflow.org/how_tos/summaries_and_tensorboard/index.md
|
||||
|
||||
If you modify this file, please update the exerpt in
|
||||
If you modify this file, please update the excerpt in
|
||||
how_tos/summaries_and_tensorboard/index.md.
|
||||
|
||||
"""
|
||||
|
@ -183,6 +183,7 @@
|
||||
},
|
||||
"source": [
|
||||
"num_classes = 10\n",
|
||||
"np.random.seed(133)\n",
|
||||
"\n",
|
||||
"def maybe_extract(filename, force=False):\n",
|
||||
" root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz\n",
|
||||
@ -539,6 +540,8 @@
|
||||
" try:\n",
|
||||
" with open(pickle_file, 'rb') as f:\n",
|
||||
" letter_set = pickle.load(f)\n",
|
||||
" # let's shuffle the letters to have random validation and training set\n",
|
||||
" np.random.shuffle(letter_set)\n",
|
||||
" if valid_dataset is not None:\n",
|
||||
" valid_letter = letter_set[:vsize_per_class, :, :]\n",
|
||||
" valid_dataset[start_v:end_v, :, :] = valid_letter\n",
|
||||
@ -607,14 +610,14 @@
|
||||
"cellView": "both"
|
||||
},
|
||||
"source": [
|
||||
"np.random.seed(133)\n",
|
||||
"def randomize(dataset, labels):\n",
|
||||
" permutation = np.random.permutation(labels.shape[0])\n",
|
||||
" shuffled_dataset = dataset[permutation,:,:]\n",
|
||||
" shuffled_labels = labels[permutation]\n",
|
||||
" return shuffled_dataset, shuffled_labels\n",
|
||||
"train_dataset, train_labels = randomize(train_dataset, train_labels)\n",
|
||||
"test_dataset, test_labels = randomize(test_dataset, test_labels)"
|
||||
"test_dataset, test_labels = randomize(test_dataset, test_labels)\n",
|
||||
"valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)"
|
||||
],
|
||||
"outputs": [],
|
||||
"execution_count": 0
|
||||
@ -770,4 +773,4 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -462,4 +462,4 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -887,4 +887,4 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -509,7 +509,8 @@ def main(_):
|
||||
sys.exit(1)
|
||||
opts = Options()
|
||||
with tf.Graph().as_default(), tf.Session() as session:
|
||||
model = Word2Vec(opts, session)
|
||||
with tf.device("/cpu:0"):
|
||||
model = Word2Vec(opts, session)
|
||||
for _ in xrange(opts.epochs_to_train):
|
||||
model.train() # Process one epoch
|
||||
model.eval() # Eval analogies.
|
||||
|
@ -414,7 +414,8 @@ def main(_):
|
||||
sys.exit(1)
|
||||
opts = Options()
|
||||
with tf.Graph().as_default(), tf.Session() as session:
|
||||
model = Word2Vec(opts, session)
|
||||
with tf.device("/cpu:0"):
|
||||
model = Word2Vec(opts, session)
|
||||
for _ in xrange(opts.epochs_to_train):
|
||||
model.train() # Process one epoch
|
||||
model.eval() # Eval analogies.
|
||||
|
@ -21,6 +21,8 @@ from __future__ import print_function
|
||||
import inspect
|
||||
import re
|
||||
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
from tensorflow.core.framework import attr_value_pb2
|
||||
from tensorflow.core.framework import function_pb2
|
||||
from tensorflow.core.framework import op_def_pb2
|
||||
@ -159,7 +161,7 @@ def _add_op_node(graph, op, func):
|
||||
inp_index += 1
|
||||
node.dep.extend([_make_argname_from_tensor_name(x.name)
|
||||
for x in op.control_inputs])
|
||||
for k, v in _get_node_def_attr(op).iteritems():
|
||||
for k, v in _get_node_def_attr(op).items():
|
||||
node.attr[k].CopyFrom(v)
|
||||
func.node.extend([node])
|
||||
|
||||
@ -322,7 +324,7 @@ def define_function(func, input_types):
|
||||
if inspect.isfunction(func):
|
||||
func_name = func.__name__
|
||||
elif inspect.ismethod(func):
|
||||
func_name = func.im_self.__name__ + "." + func.__name__
|
||||
func_name = func.__self__.__name__ + "." + func.__name__
|
||||
else:
|
||||
raise ValueError("Argument must be a function")
|
||||
argspec = inspect.getargspec(func)
|
||||
|
@ -24,6 +24,7 @@ import tensorflow.python.platform
|
||||
|
||||
import time
|
||||
import numpy as np
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import function
|
||||
|
@ -107,7 +107,7 @@ class UnaryOpTest(tf.test.TestCase):
|
||||
try:
|
||||
return fn(x)
|
||||
except ValueError as e:
|
||||
if "domain error" in e.message:
|
||||
if "domain error" in str(e):
|
||||
return np.inf * np.ones_like(x)
|
||||
else:
|
||||
raise e
|
||||
|
@ -32,7 +32,14 @@ def assert_summary_scope(regexp):
|
||||
for summary in tf.get_collection(tf.GraphKeys.SUMMARIES):
|
||||
tag = tf.unsupported.constant_value(summary.op.inputs[0])
|
||||
assert tag is not None, 'All summaries must have constant tags'
|
||||
|
||||
tag = str(tag)
|
||||
# Tags in Python 3 erroneously acquire the 'b\'' prefix and '\'' suffix
|
||||
# This is a temporary measure meant to expediently overcome test failure.
|
||||
# TODO(danmane): Get rid of the extraneous prefix and suffix.
|
||||
if tag.startswith('b\'') and tag.endswith('\''):
|
||||
tag = tag[2:-1]
|
||||
|
||||
assert isinstance(tag[0], six.string_types), tag[0]
|
||||
assert re.match(regexp, tag), "tag doesn't match %s: %s" % (regexp, tag)
|
||||
|
||||
@ -59,8 +66,9 @@ class FullyConnectedTest(tf.test.TestCase):
|
||||
self.assertTrue(np.all(out_value >= 0),
|
||||
'Relu should have all values >= 0.')
|
||||
|
||||
self.assertGreater(tf.get_collection(tf.GraphKeys.SUMMARIES), 0,
|
||||
'Some summaries should have been added.')
|
||||
self.assertGreater(
|
||||
len(tf.get_collection(tf.GraphKeys.SUMMARIES)), 0,
|
||||
'Some summaries should have been added.')
|
||||
self.assertEqual(2,
|
||||
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
|
||||
self.assertEqual(0,
|
||||
@ -81,8 +89,9 @@ class FullyConnectedTest(tf.test.TestCase):
|
||||
self.assertTrue(np.all(out_value >= 0),
|
||||
'Relu should have all values >= 0.')
|
||||
|
||||
self.assertGreater(tf.get_collection(tf.GraphKeys.SUMMARIES), 0,
|
||||
'Some summaries should have been added.')
|
||||
self.assertGreater(
|
||||
len(tf.get_collection(tf.GraphKeys.SUMMARIES)), 0,
|
||||
'Some summaries should have been added.')
|
||||
self.assertEqual(2,
|
||||
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
|
||||
self.assertEqual(0,
|
||||
@ -105,8 +114,9 @@ class FullyConnectedTest(tf.test.TestCase):
|
||||
self.assertTrue(np.all(out_value <= 6),
|
||||
'Relu6 should have all values <= 6.')
|
||||
|
||||
self.assertGreater(tf.get_collection(tf.GraphKeys.SUMMARIES), 0,
|
||||
'Some summaries should have been added.')
|
||||
self.assertGreater(
|
||||
len(tf.get_collection(tf.GraphKeys.SUMMARIES)), 0,
|
||||
'Some summaries should have been added.')
|
||||
self.assertEqual(2,
|
||||
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
|
||||
self.assertEqual(0,
|
||||
@ -167,8 +177,8 @@ class FullyConnectedTest(tf.test.TestCase):
|
||||
weight_collections=['unbiased'],
|
||||
bias_collections=['biased'])
|
||||
|
||||
self.assertEquals(1, len(tf.get_collection('unbiased')))
|
||||
self.assertEquals(1, len(tf.get_collection('biased')))
|
||||
self.assertEqual(1, len(tf.get_collection('unbiased')))
|
||||
self.assertEqual(1, len(tf.get_collection('biased')))
|
||||
|
||||
def test_all_custom_collections(self):
|
||||
tf.learn.fully_connected(self.input,
|
||||
@ -177,18 +187,19 @@ class FullyConnectedTest(tf.test.TestCase):
|
||||
weight_collections=['unbiased', 'all'],
|
||||
bias_collections=['biased', 'all'])
|
||||
|
||||
self.assertEquals(1, len(tf.get_collection('unbiased')))
|
||||
self.assertEquals(1, len(tf.get_collection('biased')))
|
||||
self.assertEquals(2, len(tf.get_collection('all')))
|
||||
self.assertEquals(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES),
|
||||
tf.get_collection('all'))
|
||||
self.assertEqual(1, len(tf.get_collection('unbiased')))
|
||||
self.assertEqual(1, len(tf.get_collection('biased')))
|
||||
self.assertEqual(2, len(tf.get_collection('all')))
|
||||
self.assertEqual(
|
||||
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES),
|
||||
tf.get_collection('all'))
|
||||
|
||||
def test_no_summaries(self):
|
||||
tf.learn.fully_connected(self.input,
|
||||
2,
|
||||
activation_fn=tf.nn.relu,
|
||||
create_summaries=False)
|
||||
self.assertEquals([], tf.get_collection(tf.GraphKeys.SUMMARIES))
|
||||
self.assertEqual([], tf.get_collection(tf.GraphKeys.SUMMARIES))
|
||||
|
||||
# Verify fix of a bug where no_summaries + activation_fn=None led to a
|
||||
# NoneType exception.
|
||||
@ -197,7 +208,7 @@ class FullyConnectedTest(tf.test.TestCase):
|
||||
2,
|
||||
activation_fn=None,
|
||||
create_summaries=False)
|
||||
self.assertEquals([], tf.get_collection(tf.GraphKeys.SUMMARIES))
|
||||
self.assertEqual([], tf.get_collection(tf.GraphKeys.SUMMARIES))
|
||||
|
||||
def test_regularizer(self):
|
||||
cnt = [0]
|
||||
@ -264,8 +275,9 @@ class Convolution2dTest(tf.test.TestCase):
|
||||
self.assertTrue(np.all(out_value >= 0),
|
||||
'Relu should have capped all values.')
|
||||
|
||||
self.assertGreater(tf.get_collection(tf.GraphKeys.SUMMARIES), 0,
|
||||
'Some summaries should have been added.')
|
||||
self.assertGreater(
|
||||
len(tf.get_collection(tf.GraphKeys.SUMMARIES)), 0,
|
||||
'Some summaries should have been added.')
|
||||
self.assertEqual(2,
|
||||
len(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)))
|
||||
self.assertEqual(0,
|
||||
@ -330,8 +342,8 @@ class Convolution2dTest(tf.test.TestCase):
|
||||
weight_collections=['unbiased'],
|
||||
bias_collections=['biased'])
|
||||
|
||||
self.assertEquals(1, len(tf.get_collection('unbiased')))
|
||||
self.assertEquals(1, len(tf.get_collection('biased')))
|
||||
self.assertEqual(1, len(tf.get_collection('unbiased')))
|
||||
self.assertEqual(1, len(tf.get_collection('biased')))
|
||||
|
||||
def test_all_custom_collections(self):
|
||||
tf.learn.convolution2d(self.input,
|
||||
@ -340,18 +352,19 @@ class Convolution2dTest(tf.test.TestCase):
|
||||
weight_collections=['unbiased', 'all'],
|
||||
bias_collections=['biased', 'all'])
|
||||
|
||||
self.assertEquals(1, len(tf.get_collection('unbiased')))
|
||||
self.assertEquals(1, len(tf.get_collection('biased')))
|
||||
self.assertEquals(2, len(tf.get_collection('all')))
|
||||
self.assertEquals(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES),
|
||||
tf.get_collection('all'))
|
||||
self.assertEqual(1, len(tf.get_collection('unbiased')))
|
||||
self.assertEqual(1, len(tf.get_collection('biased')))
|
||||
self.assertEqual(2, len(tf.get_collection('all')))
|
||||
self.assertEqual(
|
||||
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES),
|
||||
tf.get_collection('all'))
|
||||
|
||||
def test_no_summaries(self):
|
||||
tf.learn.convolution2d(self.input,
|
||||
2, (3, 3),
|
||||
activation_fn=tf.nn.relu,
|
||||
create_summaries=False)
|
||||
self.assertEquals([], tf.get_collection(tf.GraphKeys.SUMMARIES))
|
||||
self.assertEqual([], tf.get_collection(tf.GraphKeys.SUMMARIES))
|
||||
|
||||
def test_regularizer(self):
|
||||
cnt = [0]
|
||||
|
@ -429,12 +429,8 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
|
||||
string_val, int_val = sess.run(dequeued_t)
|
||||
|
||||
self.assertAllEqual(
|
||||
[["a", "", ""],
|
||||
["ab", "", ""],
|
||||
["abc", "", ""],
|
||||
["abc", "d", ""],
|
||||
["abc", "d", "e"]],
|
||||
string_val)
|
||||
[[b"a", b"", b""], [b"ab", b"", b""], [b"abc", b"", b""],
|
||||
[b"abc", b"d", b""], [b"abc", b"d", b"e"]], string_val)
|
||||
self.assertAllEqual(
|
||||
[[[1, 0, 0]],
|
||||
[[2, 0, 0]],
|
||||
@ -450,7 +446,7 @@ class PaddingFIFOQueueTest(tf.test.TestCase):
|
||||
dequeued_t[1].get_shape()))
|
||||
|
||||
string_val, int_val = sess.run(dequeued_single_t)
|
||||
self.assertAllEqual(["abc", "d", "e", "f"], string_val)
|
||||
self.assertAllEqual([b"abc", b"d", b"e", b"f"], string_val)
|
||||
self.assertAllEqual([[1, 2, 3, 4]], int_val)
|
||||
self.assertTrue(
|
||||
tf.TensorShape(string_val.shape).is_compatible_with(
|
||||
|
@ -98,7 +98,7 @@ class ParseExampleTest(tf.test.TestCase):
|
||||
batch_size = (
|
||||
serialized.eval().size if isinstance(serialized, tf.Tensor)
|
||||
else np.asarray(serialized).size)
|
||||
for k, f in kwargs["features"].iteritems():
|
||||
for k, f in kwargs["features"].items():
|
||||
if isinstance(f, tf.FixedLenFeature) and f.shape is not None:
|
||||
self.assertEqual(
|
||||
tuple(out[k].get_shape().as_list()), (batch_size,) + f.shape)
|
||||
@ -367,7 +367,7 @@ class ParseSingleExampleTest(tf.test.TestCase):
|
||||
_compare_output_to_expected(self, out, expected_values, tf_result)
|
||||
|
||||
# Check shapes.
|
||||
for k, f in kwargs["features"].iteritems():
|
||||
for k, f in kwargs["features"].items():
|
||||
if isinstance(f, tf.FixedLenFeature) and f.shape is not None:
|
||||
self.assertEqual(tuple(out[k].get_shape()), f.shape)
|
||||
elif isinstance(f, tf.VarLenFeature):
|
||||
@ -455,7 +455,7 @@ class ParseSequenceExampleTest(tf.test.TestCase):
|
||||
# Check shapes; if serialized is a Tensor we need its size to
|
||||
# properly check.
|
||||
if "context_features" in kwargs:
|
||||
for k, f in kwargs["context_features"].iteritems():
|
||||
for k, f in kwargs["context_features"].items():
|
||||
if isinstance(f, tf.FixedLenFeature) and f.shape is not None:
|
||||
self.assertEqual(
|
||||
tuple(context_out[k].get_shape().as_list()), f.shape)
|
||||
|
@ -17,9 +17,12 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
# pylint: disable=g-bad-import-order,unused-import
|
||||
import tensorflow.python.platform
|
||||
# pylint: enable=g-bad-import-order,unused-import
|
||||
|
||||
import numpy as np
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.framework import errors
|
||||
@ -100,7 +103,7 @@ class PyOpTest(tf.test.TestCase):
|
||||
self.assertAllClose(x.eval(), 42.0)
|
||||
|
||||
def testCleanup(self):
|
||||
for _ in range(1000):
|
||||
for _ in xrange(1000):
|
||||
g = tf.Graph()
|
||||
with g.as_default():
|
||||
c = tf.constant([1.], tf.float32)
|
||||
|
@ -25,6 +25,8 @@ import tensorflow.python.platform
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
|
||||
class Plus1RNNCell(tf.nn.rnn_cell.RNNCell):
|
||||
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
|
||||
@ -581,7 +583,7 @@ class BidirectionalRNNTest(tf.test.TestCase):
|
||||
self.assertEqual(len(outputs), len(inputs))
|
||||
for out in outputs:
|
||||
self.assertEqual(out.get_shape().as_list(), [batch_size if use_shape
|
||||
else None, 2 * num_units])
|
||||
else None, 2 * num_units])
|
||||
|
||||
input_value = np.random.randn(batch_size, input_size)
|
||||
|
||||
|
@ -26,7 +26,6 @@ from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import constant_op
|
||||
from tensorflow.python.ops import data_flow_ops
|
||||
from tensorflow.python.ops import gen_array_ops
|
||||
from tensorflow.python.ops import gen_math_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
|
||||
|
||||
@ -487,8 +486,8 @@ def _SparseMatMulGrad(op, grad):
|
||||
|
||||
|
||||
@ops.RegisterGradient("Floor")
|
||||
def _FloorGrad(_, grad):
|
||||
return grad
|
||||
def _FloorGrad(_, unused_grad):
|
||||
return [None]
|
||||
|
||||
|
||||
@ops.RegisterGradient("BatchMatMul")
|
||||
|
@ -359,7 +359,7 @@ def _TopKShape(op):
|
||||
else:
|
||||
k = op.get_attr("k")
|
||||
last = input_shape[-1].value
|
||||
if last is not None and last < k:
|
||||
if last is not None and k is not None and last < k:
|
||||
raise ValueError("input.shape %s must have last dimension >= k = %d" %
|
||||
(input_shape, k))
|
||||
output_shape = input_shape[:-1].concatenate([k])
|
||||
|
@ -21,6 +21,7 @@ from __future__ import print_function
|
||||
import inspect
|
||||
import itertools
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
# pylint: disable=wildcard-import
|
||||
|
@ -56,6 +56,7 @@ py_binary(
|
||||
"graph_metrics.py",
|
||||
],
|
||||
main = "graph_metrics.py",
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow:tensorflow_py",
|
||||
],
|
||||
|
@ -27,9 +27,7 @@ from tensorflow.python.training import training_ops
|
||||
class AdagradOptimizer(optimizer.Optimizer):
|
||||
"""Optimizer that implements the Adagrad algorithm.
|
||||
|
||||
(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
|
||||
|
||||
See http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
|
||||
See this [paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
|
||||
|
||||
@@__init__
|
||||
"""
|
||||
|
@ -19,7 +19,6 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import constant_op
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import state_ops
|
||||
@ -31,9 +30,7 @@ from tensorflow.python.training import training_ops
|
||||
class AdamOptimizer(optimizer.Optimizer):
|
||||
"""Optimizer that implements the Adam algorithm.
|
||||
|
||||
(http://arxiv.org/pdf/1412.6980v7.pdf).
|
||||
|
||||
See http://arxiv.org/pdf/1412.6980v7.pdf.
|
||||
See this [paper](http://arxiv.org/pdf/1412.6980v7.pdf).
|
||||
|
||||
@@__init__
|
||||
"""
|
||||
|
@ -41,10 +41,9 @@ from tensorflow.python.training import training_ops
|
||||
class RMSPropOptimizer(optimizer.Optimizer):
|
||||
"""Optimizer that implements the RMSProp algorithm.
|
||||
|
||||
See the [paper]
|
||||
(http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).
|
||||
|
||||
See http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf.
|
||||
|
||||
@@__init__
|
||||
"""
|
||||
|
||||
|
@ -33,4 +33,8 @@ fi
|
||||
|
||||
./configure
|
||||
|
||||
# Gather and print build information
|
||||
SCRIPT_DIR=$( cd ${0%/*} && pwd -P )
|
||||
${SCRIPT_DIR}/print_build_info.sh ${CONTAINER_TYPE} ${COMMAND[@]}
|
||||
|
||||
${COMMAND[@]}
|
||||
|
@ -1,21 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
set -e
|
||||
|
||||
bazel build -c opt --config=cuda //tensorflow/tools/pip_package:build_pip_package
|
||||
rm -rf $HOME/.cache/tensorflow-pip
|
||||
bazel-bin/tensorflow/tools/pip_package/build_pip_package $HOME/.cache/tensorflow-pip
|
292
tensorflow/tools/ci_build/builds/pip.sh
Executable file
292
tensorflow/tools/ci_build/builds/pip.sh
Executable file
@ -0,0 +1,292 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
# Build the Python PIP installation package for TensorFlow
|
||||
# and run the Python unit tests from the source code on the installation
|
||||
#
|
||||
# Usage:
|
||||
# pip.sh CONTAINER_TYPE [--pip-upgrade]
|
||||
# The option "--pip-upgrade" forces "--upgrade" flag during pip install.
|
||||
#
|
||||
# When executing the Python unit tests, the script obeys the shell
|
||||
# variables: PY_TEST_WHITELIST, PY_TEST_BLACKLIST, PY_TEST_GPU_BLACKLIST,
|
||||
# and NO_TEST_ON_INSTALL
|
||||
#
|
||||
# To select only a subset of the Python tests to run, set the environment
|
||||
# variable PY_TEST_WHITELIST, e.g.,
|
||||
# PY_TEST_WHITELIST="tensorflow/python/kernel_tests/shape_ops_test.py"
|
||||
# Separate the tests with a colon (:). Leave this environment variable empty
|
||||
# to disable the whitelist.
|
||||
#
|
||||
# You can also ignore a set of the tests by using the environment variable
|
||||
# PY_TEST_BLACKLIST. For example, you can include in PY_TEST_BLACKLIST the
|
||||
# tests that depend on Python modules in TensorFlow source that are not
|
||||
# exported publicly.
|
||||
#
|
||||
# In addition, you can put blacklist for only GPU build inthe environment
|
||||
# variable PY_TEST_GPU_BLACKLIST.
|
||||
#
|
||||
# If the environmental variable NO_TEST_ON_INSTALL is set to any non-empty
|
||||
# value, the script will exit after the pip install step.
|
||||
|
||||
# =============================================================================
|
||||
# Test blacklist: General
|
||||
#
|
||||
# tensorflow/python/framework/ops_test.py
|
||||
# depends on depends on "test_ops", which is defined in a C++ file wrapped as
|
||||
# a .py file through the Bazel rule “tf_gen_ops_wrapper_py”.
|
||||
# tensorflow/util/protobuf/compare_test.py:
|
||||
# depends on compare_test_pb2 defined outside Python
|
||||
# tensorflow/python/framework/device_test.py:
|
||||
# depends on CheckValid() and ToString(), both defined externally
|
||||
#
|
||||
PY_TEST_BLACKLIST="${PY_TEST_BLACKLIST}:"\
|
||||
"tensorflow/python/framework/ops_test.py:"\
|
||||
"tensorflow/python/util/protobuf/compare_test.py:"\
|
||||
"tensorflow/python/framework/device_test.py"
|
||||
|
||||
# Test blacklist: GPU-only
|
||||
PY_TEST_GPU_BLACKLIST="${PY_TEST_GPU_BLACKLIST}:"\
|
||||
"tensorflow/python/framework/function_test.py"
|
||||
|
||||
# =============================================================================
|
||||
|
||||
# Helper functions
|
||||
# Get the absolute path from a path
|
||||
abs_path() {
|
||||
[[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
|
||||
}
|
||||
|
||||
# Get the command line arguments
|
||||
CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' )
|
||||
|
||||
PIP_BUILD_TARGET="//tensorflow/tools/pip_package:build_pip_package"
|
||||
if [[ ${CONTAINER_TYPE} == "cpu" ]]; then
|
||||
bazel build -c opt ${PIP_BUILD_TARGET}
|
||||
elif [[ ${CONTAINER_TYPE} == "gpu" ]]; then
|
||||
bazel build -c opt --config=cuda ${PIP_BUILD_TARGET}
|
||||
else
|
||||
echo "Unrecognized container type: \"${CONTAINER_TYPE}\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "PY_TEST_WHITELIST: ${PY_TEST_WHITELIST}"
|
||||
echo "PY_TEST_BLACKLIST: ${PY_TEST_BLACKLIST}"
|
||||
echo "PY_TEST_GPU_BLACKLIST: ${PY_TEST_GPU_BLACKLIST}"
|
||||
|
||||
# Append GPU-only test blacklist
|
||||
if [[ ${CONTAINER_TYPE} == "gpu" ]]; then
|
||||
PY_TEST_BLACKLIST="${PY_TEST_BLACKLIST}:${PY_TEST_GPU_BLACKLIST}"
|
||||
fi
|
||||
|
||||
# Build PIP Wheel file
|
||||
PIP_WHL_DIR="pip_test/whl"
|
||||
PIP_WHL_DIR=`abs_path ${PIP_WHL_DIR}` # Get absolute path
|
||||
rm -rf ${PIP_WHL_DIR} && mkdir -p ${PIP_WHL_DIR}
|
||||
bazel-bin/tensorflow/tools/pip_package/build_pip_package ${PIP_WHL_DIR} &&
|
||||
|
||||
# Perform installation
|
||||
WHL_PATH=`ls ${PIP_WHL_DIR}/tensorflow*.whl`
|
||||
if [[ `echo ${WHL_PATH} | wc -w` -ne 1 ]]; then
|
||||
echo "ERROR: Failed to find exactly one built TensorFlow .whl file in "\
|
||||
"directory: ${PIP_WHL_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "whl file path = ${WHL_PATH}"
|
||||
|
||||
# Install, in user's local home folder
|
||||
echo "Installing pip whl file: ${WHL_PATH}"
|
||||
|
||||
UPGRADE_OPT=""
|
||||
if [[ $2 == "--pip-upgrade" ]]; then
|
||||
UPGRADE_OPT="--upgrade"
|
||||
fi
|
||||
|
||||
pip install -v --user ${UPGRADE_OPT} ${WHL_PATH} &&
|
||||
|
||||
# If NO_TEST_ON_INSTALL is set to any non-empty value, skip all Python
|
||||
# tests-on-install and exit right away
|
||||
if [[ ! -z ${NO_TEST_ON_INSTALL} ]]; then
|
||||
echo "NO_TEST_ON_INSTALL=${NO_TEST_ON_INSTALL}:"
|
||||
echo " Skipping ALL Python unit tests on install"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Directory from which the unit-test files will be run
|
||||
PY_TEST_DIR_REL="pip_test/tests"
|
||||
PY_TEST_DIR=`abs_path ${PY_TEST_DIR_REL}` # Get absolute path
|
||||
rm -rf ${PY_TEST_DIR} && mkdir -p ${PY_TEST_DIR}
|
||||
|
||||
# Create test log directory
|
||||
PY_TEST_LOG_DIR_REL=${PY_TEST_DIR_REL}/logs
|
||||
PY_TEST_LOG_DIR=`abs_path ${PY_TEST_LOG_DIR_REL}` # Absolute path
|
||||
|
||||
mkdir ${PY_TEST_LOG_DIR}
|
||||
|
||||
# Copy source files that are required by the tests but are not included in the
|
||||
# PIP package
|
||||
|
||||
# Look for local Python library directory
|
||||
LIB_PYTHON_DIR=""
|
||||
|
||||
# Candidate locations of the local Python library directory
|
||||
LIB_PYTHON_DIR_CANDS="${HOME}/.local/lib/python* "\
|
||||
"${HOME}/Library/Python/*/lib/python"
|
||||
|
||||
for CAND in ${LIB_PYTHON_DIR_CANDS}; do
|
||||
if [[ -d "${CAND}" ]]; then
|
||||
LIB_PYTHON_DIR="${CAND}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z ${LIB_PYTHON_DIR} ]]; then
|
||||
echo "Failed to find local Python library directory"
|
||||
exit 1
|
||||
else
|
||||
echo "Found local Python library directory at: ${LIB_PYTHON_DIR}"
|
||||
fi
|
||||
|
||||
PACKAGES_DIR=`ls -d ${LIB_PYTHON_DIR}/*-packages | head -1`
|
||||
|
||||
echo "Copying some source directories that are required by tests but are "\
|
||||
"not included in install to Python packages directory: ${PACKAGES_DIR}"
|
||||
|
||||
# tensorflow.python.tools
|
||||
rm -rf ${PACKAGES_DIR}/tensorflow/python/tools
|
||||
cp -r tensorflow/python/tools \
|
||||
${PACKAGES_DIR}/tensorflow/python/tools
|
||||
touch ${PACKAGES_DIR}/tensorflow/python/tools/__init__.py # Make module visible
|
||||
|
||||
echo "Copying additional files required by tests to working directory "\
|
||||
"for test: ${PY_TEST_DIR}"
|
||||
|
||||
# Image files required by some tests, e.g., images_ops_test.py
|
||||
mkdir -p ${PY_TEST_DIR}/tensorflow/core/lib
|
||||
rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/jpeg
|
||||
cp -r tensorflow/core/lib/jpeg ${PY_TEST_DIR}/tensorflow/core/lib
|
||||
rm -rf ${PY_TEST_DIR}/tensorflow/core/lib/png
|
||||
cp -r tensorflow/core/lib/png ${PY_TEST_DIR}/tensorflow/core/lib
|
||||
|
||||
# Run tests
|
||||
DIR0=`pwd`
|
||||
ALL_PY_TESTS=`find tensorflow/python -name "*_test.py"`
|
||||
PY_TEST_COUNT=`echo ${ALL_PY_TESTS} | wc -w`
|
||||
|
||||
if [[ ${PY_TEST_COUNT} -eq 0 ]]; then
|
||||
echo "ERROR: Cannot find any tensorflow Python unit tests to run on install"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Iterate through all the Python unit test files using the installation
|
||||
COUNTER=0
|
||||
PASS_COUNTER=0
|
||||
FAIL_COUNTER=0
|
||||
SKIP_COUNTER=0
|
||||
FAILED_TESTS=""
|
||||
FAILED_TEST_LOGS=""
|
||||
|
||||
for TEST_FILE_PATH in ${ALL_PY_TESTS}; do
|
||||
((COUNTER++))
|
||||
|
||||
PROG_STR="(${COUNTER} / ${PY_TEST_COUNT})"
|
||||
|
||||
# If PY_TEST_WHITELIST is not empty, only the white-listed tests will be run
|
||||
if [[ ! -z ${PY_TEST_WHITELIST} ]] && \
|
||||
[[ ! ${PY_TEST_WHITELIST} == *"${TEST_FILE_PATH}"* ]]; then
|
||||
((SKIP_COUNTER++))
|
||||
echo "${PROG_STR} Non-whitelisted test SKIPPED: ${TEST_FILE_PATH}"
|
||||
continue
|
||||
fi
|
||||
|
||||
# If the test is in the black list, skip it
|
||||
if [[ ${PY_TEST_BLACKLIST} == *"${TEST_FILE_PATH}"* ]]; then
|
||||
((SKIP_COUNTER++))
|
||||
echo "${PROG_STR} Blacklisted test SKIPPED: ${TEST_FILE_PATH}"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Copy to a separate directory to guard against the possibility of picking up
|
||||
# modules in the source directory
|
||||
cp ${TEST_FILE_PATH} ${PY_TEST_DIR}/
|
||||
|
||||
TEST_BASENAME=`basename "${TEST_FILE_PATH}"`
|
||||
|
||||
# Relative path of the test log. Use long path in case there are duplicate
|
||||
# file names in the Python tests
|
||||
TEST_LOG_REL="${PY_TEST_LOG_DIR_REL}/${TEST_FILE_PATH}.log"
|
||||
mkdir -p `dirname ${TEST_LOG_REL}` # Create directory for log
|
||||
|
||||
TEST_LOG=`abs_path ${TEST_LOG_REL}` # Absolute path
|
||||
|
||||
# Before running the test, cd away from the Tensorflow source to
|
||||
# avoid the possibility of picking up dependencies from the
|
||||
# source directory
|
||||
cd ${PY_TEST_DIR}
|
||||
python ${PY_TEST_DIR}/${TEST_BASENAME} >${TEST_LOG} 2>&1
|
||||
|
||||
# Check for pass or failure status of the test outtput and exit
|
||||
if [[ $? -eq 0 ]]; then
|
||||
((PASS_COUNTER++))
|
||||
|
||||
echo "${PROG_STR} Python test-on-install PASSED: ${TEST_FILE_PATH}"
|
||||
else
|
||||
((FAIL_COUNTER++))
|
||||
|
||||
FAILED_TESTS="${FAILED_TESTS} ${TEST_FILE_PATH}"
|
||||
|
||||
FAILED_TEST_LOGS="${FAILED_TEST_LOGS} ${TEST_LOG_REL}"
|
||||
|
||||
echo "${PROG_STR} Python test-on-install FAILED: ${TEST_FILE_PATH}"
|
||||
echo " Log @: ${TEST_LOG_REL}"
|
||||
echo "============== BEGINS failure log content =============="
|
||||
cat ${TEST_LOG}
|
||||
echo "============== ENDS failure log content =============="
|
||||
echo ""
|
||||
fi
|
||||
cd ${DIR0}
|
||||
|
||||
# Clean up files for this test
|
||||
rm -f ${PY_TEST_DIR}/${TEST_BASENAME}
|
||||
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "${PY_TEST_COUNT} Python test(s):" \
|
||||
"${PASS_COUNTER} passed;" \
|
||||
"${FAIL_COUNTER} failed; " \
|
||||
"${SKIP_COUNTER} skipped"
|
||||
echo "Test logs directory: ${PY_TEST_LOG_DIR_REL}"
|
||||
|
||||
if [[ ${FAIL_COUNTER} -eq 0 ]]; then
|
||||
echo ""
|
||||
echo "Python test-on-install SUCCEEDED"
|
||||
|
||||
exit 0
|
||||
else
|
||||
echo "FAILED test(s):"
|
||||
FAILED_TEST_LOGS=($FAILED_TEST_LOGS)
|
||||
FAIL_COUNTER=0
|
||||
for TEST_NAME in ${FAILED_TESTS}; do
|
||||
echo " ${TEST_NAME} (Log @: ${FAILED_TEST_LOGS[${FAIL_COUNTER}]})"
|
||||
((FAIL_COUNTER++))
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "Python test-on-install FAILED"
|
||||
exit 1
|
||||
fi
|
94
tensorflow/tools/ci_build/builds/print_build_info.sh
Executable file
94
tensorflow/tools/ci_build/builds/print_build_info.sh
Executable file
@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# =============================================================================
|
||||
|
||||
# Print build info, including info related to the machine, OS, build tools
|
||||
# and TensorFlow source code. This can be used by build tools such as Jenkins.
|
||||
# All info is printed on a single line, in JSON format, to workaround the
|
||||
# limitation of Jenkins Description Setter Plugin that multi-line regex is
|
||||
# not supported.
|
||||
#
|
||||
# Usage:
|
||||
# print_build_info.sh (CONTAINER_TYPE) (COMMAND)
|
||||
# e.g.,
|
||||
# print_build_info.sh GPU bazel test -c opt --config=cuda //tensorflow/...
|
||||
|
||||
# Information about the command
|
||||
CONTAINER_TYPE=$1
|
||||
shift 1
|
||||
COMMAND=("$@")
|
||||
|
||||
# Information about machine and OS
|
||||
OS=`uname`
|
||||
KERNEL=`uname -r`
|
||||
|
||||
ARCH=`uname -p`
|
||||
PROCESSOR=`grep "model name" /proc/cpuinfo | head -1 | awk '{print substr($0, index($0, $4))}'`
|
||||
PROCESSOR_COUNT=`grep "model name" /proc/cpuinfo | wc -l`
|
||||
|
||||
MEM_TOTAL=`grep MemTotal /proc/meminfo | awk '{print $2, $3}'`
|
||||
SWAP_TOTAL=`grep SwapTotal /proc/meminfo | awk '{print $2, $3}'`
|
||||
|
||||
# Information about build tools
|
||||
BAZEL_VER=`bazel version | head -1`
|
||||
JAVA_VER=`javac -version 2>&1 | awk '{print $2}'`
|
||||
PYTHON_VER=`python -V 2>&1 | awk '{print $2}'`
|
||||
GPP_VER=`g++ --version | head -1`
|
||||
SWIG_VER=`swig -version | grep -m 1 . | awk '{print $3}'`
|
||||
|
||||
# Information about TensorFlow source
|
||||
TF_FETCH_URL=`git remote show origin | grep "Fetch URL:" | awk '{print $3}'`
|
||||
TF_HEAD=`git rev-parse HEAD`
|
||||
|
||||
# NVIDIA & CUDA info
|
||||
NVIDIA_DRIVER_VER=""
|
||||
if [[ -f /proc/driver/nvidia/version ]]; then
|
||||
NVIDIA_DRIVER_VER=`head -1 /proc/driver/nvidia/version | awk '{print $(NF-6)}'`
|
||||
fi
|
||||
|
||||
CUDA_DEVICE_COUNT="0"
|
||||
CUDA_DEVICE_NAMES=""
|
||||
if [[ ! -z `which nvidia-debugdump` ]]; then
|
||||
CUDA_DEVICE_COUNT=`nvidia-debugdump -l | grep "^Found [0-9]*.*device.*" | awk '{print $2}'`
|
||||
CUDA_DEVICE_NAMES=`nvidia-debugdump -l | grep "Device name:.*" | awk '{print substr($0, index($0, $3)) ","}'`
|
||||
fi
|
||||
|
||||
CUDA_TOOLKIT_VER=""
|
||||
if [[ ! -z 'which nvcc' ]]; then
|
||||
CUDA_TOOLKIT_VER=`nvcc -V | grep release | awk '{print $(NF)}'`
|
||||
fi
|
||||
|
||||
# Print info
|
||||
echo "TF_BUILD_INFO = {"\
|
||||
"container_type: \"${CONTAINER_TYPE}\", "\
|
||||
"command: \"${COMMAND[@]}\", "\
|
||||
"source_HEAD: \"${TF_HEAD}\", "\
|
||||
"source_remote_origin: \"${TF_FETCH_URL}\", "\
|
||||
"OS: \"${OS}\", "\
|
||||
"kernel: \"${KERNEL}\", "\
|
||||
"architecture: \"${ARCH}\", "\
|
||||
"processor: \"${PROCESSOR}\", "\
|
||||
"processor_count: \"${PROCESSOR_COUNT}\", "\
|
||||
"memory_total: \"${MEM_TOTAL}\", "\
|
||||
"swap_total: \"${SWAP_TOTAL}\", "\
|
||||
"Bazel_version: \"${BAZEL_VER}\", "\
|
||||
"Java_version: \"${JAVA_VER}\", "\
|
||||
"Python_version: \"${PYTHON_VER}\", "\
|
||||
"gpp_version: \"${GPP_VER}\", "\
|
||||
"NVIDIA_driver_version: \"${NVIDIA_DRIVER_VER}\", "\
|
||||
"CUDA_device_count: \"${CUDA_DEVICE_COUNT}\", "\
|
||||
"CUDA_device_names: \"${CUDA_DEVICE_NAMES}\", "\
|
||||
"CUDA_toolkit_version: \"${CUDA_TOOLKIT_VER}\""\
|
||||
"}"
|
@ -35,7 +35,9 @@ fi
|
||||
|
||||
# Optional arguments - environment variables. For example:
|
||||
# CI_DOCKER_EXTRA_PARAMS='-it --rm' CI_COMMAND_PREFIX='' tensorflow/tools/ci_build/ci_build.sh CPU /bin/bash
|
||||
CI_DOCKER_EXTRA_PARAMS=("${CI_DOCKER_EXTRA_PARAMS[@]:---rm}")
|
||||
if [[ "${CI_DOCKER_EXTRA_PARAMS}" != *"--rm"* ]]; then
|
||||
CI_DOCKER_EXTRA_PARAMS="--rm ${CI_DOCKER_EXTRA_PARAMS}"
|
||||
fi
|
||||
CI_COMMAND_PREFIX=("${CI_COMMAND_PREFIX[@]:-tensorflow/tools/ci_build/builds/with_the_same_user tensorflow/tools/ci_build/builds/configured ${CONTAINER_TYPE}}")
|
||||
|
||||
|
||||
@ -64,6 +66,15 @@ else
|
||||
GPU_EXTRA_PARAMS=""
|
||||
fi
|
||||
|
||||
# Determine the docker image name
|
||||
DOCKER_IMG_NAME="${BUILD_TAG}.${CONTAINER_TYPE}"
|
||||
|
||||
# Under Jenkins matrix build, the build tag may contain characters such as
|
||||
# commas (,) and equal signs (=), which are not valid inside docker image names.
|
||||
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | sed -e 's/=/_/g' -e 's/,/-/g')
|
||||
|
||||
# Convert to all lower-case, as per requirement of Docker image names
|
||||
DOCKER_IMG_NAME=$(echo "${DOCKER_IMG_NAME}" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Print arguments.
|
||||
echo "WORKSAPCE: ${WORKSPACE}"
|
||||
@ -72,18 +83,17 @@ echo "COMMAND: ${COMMAND[@]}"
|
||||
echo "CI_COMMAND_PREFIX: ${CI_COMMAND_PREFIX[@]}"
|
||||
echo "CONTAINER_TYPE: ${CONTAINER_TYPE}"
|
||||
echo "BUILD_TAG: ${BUILD_TAG}"
|
||||
echo " (docker container name will be ${BUILD_TAG}.${CONTAINER_TYPE})"
|
||||
echo " (docker container name will be ${DOCKER_IMG_NAME})"
|
||||
echo ""
|
||||
|
||||
|
||||
# Build the docker container.
|
||||
echo "Building container (${BUILD_TAG}.${CONTAINER_TYPE})..."
|
||||
docker build -t ${BUILD_TAG}.${CONTAINER_TYPE} \
|
||||
echo "Building container (${DOCKER_IMG_NAME})..."
|
||||
docker build -t ${DOCKER_IMG_NAME} \
|
||||
-f ${SCRIPT_DIR}/Dockerfile.${CONTAINER_TYPE} ${SCRIPT_DIR}
|
||||
|
||||
|
||||
# Run the command inside the container.
|
||||
echo "Running '${COMMAND[@]}' inside ${BUILD_TAG}.${CONTAINER_TYPE}..."
|
||||
echo "Running '${COMMAND[@]}' inside ${DOCKER_IMG_NAME}..."
|
||||
mkdir -p ${WORKSPACE}/bazel-ci_build-cache
|
||||
docker run \
|
||||
-v ${WORKSPACE}/bazel-ci_build-cache:${WORKSPACE}/bazel-ci_build-cache \
|
||||
@ -96,6 +106,6 @@ docker run \
|
||||
-w /tensorflow \
|
||||
${GPU_EXTRA_PARAMS} \
|
||||
${CI_DOCKER_EXTRA_PARAMS[@]} \
|
||||
"${BUILD_TAG}.${CONTAINER_TYPE}" \
|
||||
"${DOCKER_IMG_NAME}" \
|
||||
${CI_COMMAND_PREFIX[@]} \
|
||||
${COMMAND[@]}
|
||||
|
200
tensorflow/tools/ci_build/ci_parameterized_build.sh
Executable file
200
tensorflow/tools/ci_build/ci_parameterized_build.sh
Executable file
@ -0,0 +1,200 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
#
|
||||
# Usage:
|
||||
# ci_parameterized_build.sh
|
||||
#
|
||||
# The script obeys the following required environment variables:
|
||||
# TF_BUILD_CONTAINER_TYPE: (CPU | GPU | ANDROID)
|
||||
# TF_BUILD_PYTHON_VERSION: (PYTHON2 | PYTHON3)
|
||||
# TF_BUILD_IS_OPT: (NO_OPT | OPT)
|
||||
# TF_BUILD_IS_PIP: (NO_PIP | PIP)
|
||||
#
|
||||
# Note: certain combinations of parameter values are regarded
|
||||
# as invalid and will cause the script to exit with code 0. For example:
|
||||
# NO_OPT & PIP (PIP builds should always use OPT)
|
||||
# ANDROID & PIP (Android and PIP builds are mutually exclusive)
|
||||
#
|
||||
# Additionally, the script follows the directions of optional environment
|
||||
# variables:
|
||||
# TF_BUILD_DRY_RUN: If it is set to any non-empty value that is not "0",
|
||||
# the script will just generate and print the final
|
||||
# command, but not actually run it.
|
||||
# TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS:
|
||||
# String appended to the content of CI_DOCKER_EXTRA_PARAMS
|
||||
# TF_BUILD_APPEND_ARGUMENTS:
|
||||
# Additional command line arguments for the bazel,
|
||||
# pip.sh or android.sh command
|
||||
# TF_BUILD_BAZEL_TARGET:
|
||||
# Used to override the default bazel build target:
|
||||
# //tensorflow/...
|
||||
#
|
||||
# This script can be used by Jenkins parameterized / matrix builds.
|
||||
|
||||
# Helper function: Convert to lower case
|
||||
to_lower () {
|
||||
echo "$1" | tr '[:upper:]' '[:lower:]'
|
||||
}
|
||||
|
||||
# Helper function: Strip leading and trailing whitespaces
|
||||
str_strip () {
|
||||
echo -e "$1" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//'
|
||||
}
|
||||
|
||||
|
||||
##########################################################
|
||||
# Default configuration
|
||||
CI_BUILD_DIR="tensorflow/tools/ci_build"
|
||||
|
||||
# Command to call when Docker is available
|
||||
DOCKER_MAIN_CMD="${CI_BUILD_DIR}/ci_build.sh"
|
||||
# Command to call when Docker is unavailable
|
||||
NO_DOCKER_MAIN_CMD="${CI_BUILD_DIR}/builds/configured"
|
||||
|
||||
# Additional option flags to apply when Docker is unavailable (e.g., on Mac)
|
||||
NO_DOCKER_OPT_FLAG="--linkopt=-headerpad_max_install_names"
|
||||
|
||||
BAZEL_CMD="bazel test"
|
||||
PIP_CMD="${CI_BUILD_DIR}/builds/pip.sh"
|
||||
ANDROID_CMD="${CI_BUILD_DIR}/builds/android.sh"
|
||||
|
||||
BAZEL_TARGET="//tensorflow/..."
|
||||
##########################################################
|
||||
|
||||
# Convert all the required environment variables to lower case
|
||||
TF_BUILD_CONTAINER_TYPE=$(to_lower ${TF_BUILD_CONTAINER_TYPE})
|
||||
TF_BUILD_PYTHON_VERSION=$(to_lower ${TF_BUILD_PYTHON_VERSION})
|
||||
TF_BUILD_IS_OPT=$(to_lower ${TF_BUILD_IS_OPT})
|
||||
TF_BUILD_IS_PIP=$(to_lower ${TF_BUILD_IS_PIP})
|
||||
|
||||
# Print parameter values
|
||||
echo "Required build parameters:"
|
||||
echo " TF_BUILD_CONTAINER_TYPE=${TF_BUILD_CONTAINER_TYPE}"
|
||||
echo " TF_BUILD_PYTHON_VERSION=${TF_BUILD_PYTHON_VERSION}"
|
||||
echo " TF_BUILD_IS_OPT=${TF_BUILD_IS_OPT}"
|
||||
echo " TF_BUILD_IS_PIP=${TF_BUILD_IS_PIP}"
|
||||
echo "Optional build parameters:"
|
||||
echo " TF_BUILD_DRY_RUN=${TF_BUILD_DRY_RUN}"
|
||||
echo " TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS="\
|
||||
"${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS}"
|
||||
echo " TF_BUILD_APPEND_ARGUMENTS=${TF_BUILD_APPEND_ARGUMENTS}"
|
||||
echo " TF_BUILD_BAZEL_TARGET=${TF_BUILD_BAZEL_TARGET}"
|
||||
|
||||
# Process container type
|
||||
CTYPE=${TF_BUILD_CONTAINER_TYPE}
|
||||
OPT_FLAG=""
|
||||
if [[ ${CTYPE} == "cpu" ]]; then
|
||||
:
|
||||
elif [[ ${CTYPE} == "gpu" ]]; then
|
||||
OPT_FLAG="--config=cuda"
|
||||
elif [[ ${CTYPE} == "android" ]]; then
|
||||
:
|
||||
else
|
||||
echo "Unrecognized value in TF_BUILD_CONTAINER_TYPE: "\
|
||||
"\"${TF_BUILD_CONTAINER_TYPE}\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
EXTRA_PARAMS=""
|
||||
|
||||
# Determine if Docker is available
|
||||
MAIN_CMD=${DOCKER_MAIN_CMD}
|
||||
if [[ -z "$(which docker)" ]]; then
|
||||
echo "It appears that Docker is not available on this system. "\
|
||||
"Will perform build without Docker."
|
||||
echo "In addition, the additional option flags will be applied to the build:"
|
||||
echo " ${NO_DOCKER_OPT_FLAG}"
|
||||
MAIN_CMD=${NO_DOCKER_MAIN_CMD}
|
||||
OPT_FLAG="${OPT_FLAG} ${NO_DOCKER_OPT_FLAG}"
|
||||
|
||||
fi
|
||||
|
||||
# Process Bazel "-c opt" flag
|
||||
if [[ ${TF_BUILD_IS_OPT} == "no_opt" ]]; then
|
||||
# PIP builds are done only with the -c opt flag
|
||||
if [[ ${TF_BUILD_IS_PIP} == "pip" ]]; then
|
||||
echo "Skipping parameter combination: ${TF_BUILD_IS_OPT} & "\
|
||||
"${TF_BUILD_IS_PIP}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
elif [[ ${TF_BUILD_IS_OPT} == "opt" ]]; then
|
||||
OPT_FLAG="${OPT_FLAG} -c opt"
|
||||
else
|
||||
echo "Unrecognized value in TF_BUILD_IS_OPT: \"${TF_BUILD_IS_OPT}\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Strip whitespaces from OPT_FLAG
|
||||
OPT_FLAG=$(str_strip "${OPT_FLAG}")
|
||||
|
||||
# Process PIP install-test option
|
||||
if [[ ${TF_BUILD_IS_PIP} == "no_pip" ]]; then
|
||||
# Process optional bazel target override
|
||||
if [[ ! -z "${TF_BUILD_BAZEL_TARGET}" ]]; then
|
||||
BAZEL_TARGET=${TF_BUILD_BAZEL_TARGET}
|
||||
fi
|
||||
|
||||
if [[ ${CTYPE} == "cpu" ]] || [[ ${CTYPE} == "gpu" ]]; then
|
||||
# Run Bazel
|
||||
MAIN_CMD="${MAIN_CMD} ${CTYPE} ${BAZEL_CMD} ${OPT_FLAG} "\
|
||||
"${TF_BUILD_APPEND_ARGUMENTS} ${BAZEL_TARGET}"
|
||||
elif [[ ${CTYPE} == "android" ]]; then
|
||||
MAIN_CMD="${MAIN_CMD} ${CTYPE} ${ANDROID_CMD} ${OPT_FLAG} "
|
||||
fi
|
||||
elif [[ ${TF_BUILD_IS_PIP} == "pip" ]]; then
|
||||
# Android builds conflict with PIP builds
|
||||
if [[ ${CTYPE} == "android" ]]; then
|
||||
echo "Skipping parameter combination: ${TF_BUILD_IS_PIP} & "\
|
||||
"${TF_BUILD_CONTAINER_TYPE}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
MAIN_CMD="${MAIN_CMD} ${CTYPE} ${PIP_CMD} ${CTYPE} "\
|
||||
"${TF_BUILD_APPEND_ARGUMENTS}"
|
||||
else
|
||||
echo "Unrecognized value in TF_BUILD_IS_PIP: \"${TF_BUILD_IS_PIP}\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Process Python version
|
||||
if [[ ${TF_BUILD_PYTHON_VERSION} == "python2" ]]; then
|
||||
:
|
||||
elif [[ ${TF_BUILD_PYTHON_VERSION} == "python3" ]]; then
|
||||
EXTRA_PARAMS="${EXTRA_PARAMS} -e PYTHON_BIN_PATH=/usr/bin/python3"
|
||||
else
|
||||
echo "Unrecognized value in TF_BUILD_PYTHON_VERSION: "\
|
||||
"\"${TF_BUILD_PYTHON_VERSION}\""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Append additional Docker extra parameters
|
||||
EXTRA_PARAMS="${EXTRA_PARAMS} ${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS}"
|
||||
|
||||
# Strip leading and trailing whitespaces
|
||||
EXTRA_PARAMS=$(str_strip "${EXTRA_PARAMS}")
|
||||
|
||||
# Finally, do a dry run or call the command
|
||||
echo "Final command assembled by parameterized build: "
|
||||
echo "CI_DOCKER_EXTRA_PARAMS=\"${EXTRA_PARAMS}\" ${MAIN_CMD}"
|
||||
if [[ ! -z "${TF_BUILD_DRY_RUN}" ]] && [[ ${TF_BUILD_DRY_RUN} != "0" ]]; then
|
||||
# Do a dry run: just print the final command
|
||||
echo "*** This is a DRY RUN ***"
|
||||
else
|
||||
# Call the command
|
||||
echo "Executing final command..."
|
||||
CI_DOCKER_EXTRA_PARAMS="${EXTRA_PARAMS}" ${MAIN_CMD}
|
||||
fi
|
@ -19,6 +19,6 @@ genrule(
|
||||
outs = [
|
||||
"python_checked",
|
||||
],
|
||||
cmd = "OUTPUTDIR=\"$(@D)/\"; ./util/python/python_config.sh --check && touch $$OUTPUTDIR/python_checked",
|
||||
cmd = "OUTPUTDIR=\"$(@D)/\"; $(location :python_config.sh) --check && touch $$OUTPUTDIR/python_checked",
|
||||
local = 1,
|
||||
)
|
||||
|
@ -16,7 +16,14 @@
|
||||
|
||||
set -e -o errexit
|
||||
|
||||
EXPECTED_PATHS="util/python/python_include util/python/python_lib third_party/py/numpy/numpy_include"
|
||||
# Prefix expected paths with ./ locally and external/reponame/ for remote repos.
|
||||
# TODO(kchodorow): remove once runfiles paths are fixed, see
|
||||
# https://github.com/bazelbuild/bazel/issues/848.
|
||||
script_path=$(dirname $(dirname $(dirname "$0")))
|
||||
script_path=${script_path:-.}
|
||||
EXPECTED_PATHS="$script_path/util/python/python_include"\
|
||||
" $script_path/util/python/python_lib"\
|
||||
" $script_path/third_party/py/numpy/numpy_include"
|
||||
|
||||
function main {
|
||||
argument="$1"
|
||||
|
Loading…
Reference in New Issue
Block a user