Minor formatting fixes.

Change: 113582098
This commit is contained in:
Vijay Vasudevan 2016-02-01 16:39:16 -08:00 committed by Manjunath Kudlur
parent 5ff6d34a05
commit a8d2f0983e
42 changed files with 354 additions and 216 deletions

View File

@ -50,3 +50,4 @@ Hello, TensorFlow!
* [TensorFlow website](http://tensorflow.org) * [TensorFlow website](http://tensorflow.org)
* [TensorFlow whitepaper](http://download.tensorflow.org/paper/whitepaper2015.pdf) * [TensorFlow whitepaper](http://download.tensorflow.org/paper/whitepaper2015.pdf)
* [Tensorflow MOOC on Udacity] (https://www.udacity.com/course/deep-learning--ud730)

View File

@ -114,7 +114,7 @@ void ConcurrentSteps(const Options* opts, int session_index) {
outputs.clear(); outputs.clear();
TF_CHECK_OK( TF_CHECK_OK(
session->Run({{"x", x}}, {"y:0", "y_normalized:0"}, {}, &outputs)); session->Run({{"x", x}}, {"y:0", "y_normalized:0"}, {}, &outputs));
CHECK_EQ(2, outputs.size()); CHECK_EQ(size_t{2}, outputs.size());
const Tensor& y = outputs[0]; const Tensor& y = outputs[0];
const Tensor& y_norm = outputs[1]; const Tensor& y_norm = outputs[1];

View File

@ -115,9 +115,10 @@ Graph* GetConstantGraph(const Graph* orig_graph,
already_added.insert(added); already_added.insert(added);
for (const Edge* in_edge : n->in_edges()) { for (const Edge* in_edge : n->in_edges()) {
Node* in = in_edge->src(); Node* in = in_edge->src();
CHECK_GT(node_map.count(in), 0) << n->DebugString() << " <-" CHECK_GT(node_map.count(in), size_t{0}) << n->DebugString() << " <-"
<< in->DebugString(); << in->DebugString();
CHECK_GT(already_added.count(node_map[in]), 0) << in->DebugString(); CHECK_GT(already_added.count(node_map[in]), size_t{0})
<< in->DebugString();
constant_graph->AddEdge(node_map[in], in_edge->src_output(), added, constant_graph->AddEdge(node_map[in], in_edge->src_output(), added,
in_edge->dst_input()); in_edge->dst_input());
} }

View File

@ -116,7 +116,7 @@ Device* DeviceFactory::NewDevice(const string& type,
(*opt.config.mutable_device_count())[type] = 1; (*opt.config.mutable_device_count())[type] = 1;
std::vector<Device*> devices; std::vector<Device*> devices;
device_factory->CreateDevices(opt, name_prefix, &devices); device_factory->CreateDevices(opt, name_prefix, &devices);
CHECK_EQ(devices.size(), 1); CHECK_EQ(devices.size(), size_t{1});
return devices[0]; return devices[0];
} }

View File

@ -416,7 +416,7 @@ REGISTER_KERNEL_BUILDER(Name(kGradientOp).Device(DEVICE_GPU),
const FunctionBody* FunctionLibraryRuntimeImpl::GetFunctionBody(Handle h) { const FunctionBody* FunctionLibraryRuntimeImpl::GetFunctionBody(Handle h) {
mutex_lock l(mu_); mutex_lock l(mu_);
CHECK_LE(0, h); CHECK_LE(static_cast<Handle>(0), h);
CHECK_LT(h, func_graphs_.size()); CHECK_LT(h, func_graphs_.size());
return func_graphs_[h]; return func_graphs_[h];
} }

View File

@ -153,7 +153,7 @@ void* GPUBFCAllocator::AllocateRawInternal(size_t unused_alignment,
// allocate multiples of 256 bytes so all memory addresses are // allocate multiples of 256 bytes so all memory addresses are
// nicely byte aligned. // nicely byte aligned.
size_t rounded_bytes = (256 * ((num_bytes + 255) / 256)); size_t rounded_bytes = (256 * ((num_bytes + 255) / 256));
DCHECK_EQ(0, rounded_bytes % 256); DCHECK_EQ(size_t{0}, rounded_bytes % 256);
// The BFC allocator tries to find the best fit first. // The BFC allocator tries to find the best fit first.
// //

View File

@ -171,7 +171,7 @@ bool GPURegionAllocator::ExpandPool(Pool* pool, size_t chunk_size,
bool dump_log_on_failure) { bool dump_log_on_failure) {
VLOG(1) << "ExpandPool of " << chunk_size << " from " << pool->num_chunks VLOG(1) << "ExpandPool of " << chunk_size << " from " << pool->num_chunks
<< " current members"; << " current members";
DCHECK_NE(0, chunk_size); DCHECK_NE(size_t{0}, chunk_size);
// If chunk_size is < 4096, double the pool size. Otherwise // If chunk_size is < 4096, double the pool size. Otherwise
// just increase by one. // just increase by one.
int num_chunks = pool->num_chunks; int num_chunks = pool->num_chunks;

View File

@ -39,7 +39,7 @@ PoolAllocator::PoolAllocator(size_t pool_size_limit, bool auto_resize,
size_rounder_(size_rounder), size_rounder_(size_rounder),
allocation_begun_(false) { allocation_begun_(false) {
if (auto_resize) { if (auto_resize) {
CHECK_LT(0, pool_size_limit) CHECK_LT(size_t{0}, pool_size_limit)
<< "size limit must be > 0 if auto_resize is true."; << "size limit must be > 0 if auto_resize is true.";
} }
} }

View File

@ -157,7 +157,7 @@ Status BuildInputArgIndex(const OpDef::ArgDef& arg_def,
bool is_type_list; bool is_type_list;
DataTypeVector dtypes; DataTypeVector dtypes;
TF_RETURN_IF_ERROR(ArgNumType(attr_values, arg_def, &is_type_list, &dtypes)); TF_RETURN_IF_ERROR(ArgNumType(attr_values, arg_def, &is_type_list, &dtypes));
CHECK_GE(dtypes.size(), 1); CHECK_GE(dtypes.size(), size_t{1});
GraphDef* gdef = &result->gdef; GraphDef* gdef = &result->gdef;
int arg_index = gdef->node_size(); int arg_index = gdef->node_size();
TF_RETURN_IF_ERROR(AddArgName(name_info, arg_def.name(), TF_RETURN_IF_ERROR(AddArgName(name_info, arg_def.name(),
@ -312,7 +312,7 @@ Status AddReturnNode(const OpDef::ArgDef& ret_def,
bool is_type_list; bool is_type_list;
DataTypeVector dtypes; DataTypeVector dtypes;
TF_RETURN_IF_ERROR(ArgNumType(attrs, ret_def, &is_type_list, &dtypes)); TF_RETURN_IF_ERROR(ArgNumType(attrs, ret_def, &is_type_list, &dtypes));
CHECK_GE(dtypes.size(), 1); CHECK_GE(dtypes.size(), size_t{1});
const NameInfoItem* item = gtl::FindOrNull(name_info, ret_def.name()); const NameInfoItem* item = gtl::FindOrNull(name_info, ret_def.name());
if (item == nullptr) { if (item == nullptr) {
return errors::InvalidArgument("ret is not found."); return errors::InvalidArgument("ret is not found.");

View File

@ -127,7 +127,7 @@ class FunctionDefHelper {
n.attr.push_back({"dtype", dtype}); n.attr.push_back({"dtype", dtype});
int64 num = vals.size(); int64 num = vals.size();
Tensor t(dtype, TensorShape({num})); Tensor t(dtype, TensorShape({num}));
for (int i = 0; i < vals.size(); ++i) { for (size_t i = 0; i < vals.size(); ++i) {
t.flat<T>()(i) = vals[i]; t.flat<T>()(i) = vals[i];
} }
n.attr.push_back({"value", t}); n.attr.push_back({"value", t});

View File

@ -440,7 +440,7 @@ typename TTypes<T, NDIMS>::Tensor Tensor::shaped(
CHECK_EQ(NDIMS, new_sizes.size()); CHECK_EQ(NDIMS, new_sizes.size());
int64 new_num_elements = 1; int64 new_num_elements = 1;
Eigen::array<Eigen::DenseIndex, NDIMS> dims; Eigen::array<Eigen::DenseIndex, NDIMS> dims;
for (int d = 0; d < NDIMS; d++) { for (size_t d = 0; d < NDIMS; d++) {
new_num_elements *= new_sizes[d]; new_num_elements *= new_sizes[d];
dims[d] = new_sizes[d]; dims[d] = new_sizes[d];
} }
@ -455,7 +455,7 @@ typename TTypes<T, NDIMS>::UnalignedTensor Tensor::unaligned_shaped(
CHECK_EQ(NDIMS, new_sizes.size()); CHECK_EQ(NDIMS, new_sizes.size());
int64 new_num_elements = 1; int64 new_num_elements = 1;
Eigen::array<Eigen::DenseIndex, NDIMS> dims; Eigen::array<Eigen::DenseIndex, NDIMS> dims;
for (int d = 0; d < NDIMS; d++) { for (size_t d = 0; d < NDIMS; d++) {
new_num_elements *= new_sizes[d]; new_num_elements *= new_sizes[d];
dims[d] = new_sizes[d]; dims[d] = new_sizes[d];
} }
@ -471,7 +471,7 @@ typename TTypes<T, NDIMS>::ConstTensor Tensor::shaped(
CHECK_EQ(NDIMS, new_sizes.size()); CHECK_EQ(NDIMS, new_sizes.size());
int64 new_num_elements = 1; int64 new_num_elements = 1;
Eigen::array<Eigen::DenseIndex, NDIMS> dims; Eigen::array<Eigen::DenseIndex, NDIMS> dims;
for (int d = 0; d < NDIMS; d++) { for (size_t d = 0; d < NDIMS; d++) {
new_num_elements *= new_sizes[d]; new_num_elements *= new_sizes[d];
dims[d] = new_sizes[d]; dims[d] = new_sizes[d];
} }
@ -486,7 +486,7 @@ typename TTypes<T, NDIMS>::UnalignedConstTensor Tensor::unaligned_shaped(
CHECK_EQ(NDIMS, new_sizes.size()); CHECK_EQ(NDIMS, new_sizes.size());
int64 new_num_elements = 1; int64 new_num_elements = 1;
Eigen::array<Eigen::DenseIndex, NDIMS> dims; Eigen::array<Eigen::DenseIndex, NDIMS> dims;
for (int d = 0; d < NDIMS; d++) { for (size_t d = 0; d < NDIMS; d++) {
new_num_elements *= new_sizes[d]; new_num_elements *= new_sizes[d];
dims[d] = new_sizes[d]; dims[d] = new_sizes[d];
} }

View File

@ -41,7 +41,7 @@ Tensor DeepCopy(const Tensor& other) {
} }
Tensor Concat(const gtl::ArraySlice<Tensor>& tensors) { Tensor Concat(const gtl::ArraySlice<Tensor>& tensors) {
CHECK_GT(tensors.size(), 0); CHECK_GT(tensors.size(), size_t{0});
int64 total_dim0_size = 0; int64 total_dim0_size = 0;
for (const Tensor& tensor : tensors) { for (const Tensor& tensor : tensors) {
CHECK_GT(tensor.dims(), 0); CHECK_GT(tensor.dims(), 0);

View File

@ -253,11 +253,11 @@ const Edge* Graph::AddEdge(Node* source, int x, Node* dest, int y) {
void Graph::RemoveEdge(const Edge* e) { void Graph::RemoveEdge(const Edge* e) {
DCHECK(IsValidNode(e->src_)) << e->src_->DebugString(); DCHECK(IsValidNode(e->src_)) << e->src_->DebugString();
DCHECK(IsValidNode(e->dst_)) << e->dst_->DebugString(); DCHECK(IsValidNode(e->dst_)) << e->dst_->DebugString();
CHECK_EQ(e->src_->out_edges_.erase(e), 1); CHECK_EQ(e->src_->out_edges_.erase(e), size_t{1});
CHECK_EQ(e->dst_->in_edges_.erase(e), 1); CHECK_EQ(e->dst_->in_edges_.erase(e), size_t{1});
CHECK_EQ(e, edges_[e->id_]); CHECK_EQ(e, edges_[e->id_]);
CHECK_EQ(edge_set_.erase(e), 1); CHECK_EQ(edge_set_.erase(e), size_t{1});
edges_[e->id_] = nullptr; edges_[e->id_] = nullptr;
Edge* del = const_cast<Edge*>(e); Edge* del = const_cast<Edge*>(e);

View File

@ -614,7 +614,7 @@ struct BinaryFunctor {
template <int NDIMS> template <int NDIMS>
bool AllOne(const typename Eigen::array<Eigen::DenseIndex, NDIMS>& a) { bool AllOne(const typename Eigen::array<Eigen::DenseIndex, NDIMS>& a) {
for (int i = 0; i < a.size(); ++i) { for (size_t i = 0; i < a.size(); ++i) {
if (a[i] != 1) return false; if (a[i] != 1) return false;
} }
return true; return true;

View File

@ -33,7 +33,7 @@ class DiagonalGenerator {
T operator()( T operator()(
const Eigen::array<Eigen::DenseIndex, DoubleNumDims>& coordinates) const { const Eigen::array<Eigen::DenseIndex, DoubleNumDims>& coordinates) const {
Eigen::array<Eigen::DenseIndex, NumDims> index; Eigen::array<Eigen::DenseIndex, NumDims> index;
for (int i = 0; i < NumDims; ++i) { for (size_t i = 0; i < NumDims; ++i) {
if (coordinates[i] != coordinates[NumDims + i]) { if (coordinates[i] != coordinates[NumDims + i]) {
return T(0); return T(0);
} }

View File

@ -144,7 +144,7 @@ class EditDistanceOp : public OpKernel {
std::iota(group_dims.begin(), group_dims.end(), 0); std::iota(group_dims.begin(), group_dims.end(), 0);
TensorShape output_shape; TensorShape output_shape;
for (int d = 0; d < group_dims.size(); ++d) { for (size_t d = 0; d < group_dims.size(); ++d) {
output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d), output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d),
truth_st_shape.dim_size(d))); truth_st_shape.dim_size(d)));
} }

View File

@ -36,7 +36,7 @@ FIFOQueue::FIFOQueue(int capacity, const DataTypeVector& component_dtypes,
: TypedQueue(capacity, component_dtypes, component_shapes, name) {} : TypedQueue(capacity, component_dtypes, component_shapes, name) {}
void FIFOQueue::DequeueLocked(OpKernelContext* ctx, Tuple* tuple) { void FIFOQueue::DequeueLocked(OpKernelContext* ctx, Tuple* tuple) {
DCHECK_GT(queues_[0].size(), 0); DCHECK_GT(queues_[0].size(), size_t{0});
(*tuple).reserve(num_components()); (*tuple).reserve(num_components());
for (int i = 0; i < num_components(); ++i) { for (int i = 0; i < num_components(); ++i) {
(*tuple).push_back(*queues_[i][0].AccessTensor(ctx)); (*tuple).push_back(*queues_[i][0].AccessTensor(ctx));

View File

@ -76,7 +76,7 @@ class HashTable : public InitializableLookupTable {
const auto key_values = keys.flat<K>(); const auto key_values = keys.flat<K>();
const auto value_values = values.flat<V>(); const auto value_values = values.flat<V>();
for (size_t i = 0; i < key_values.size(); ++i) { for (int i = 0; i < key_values.size(); ++i) {
const K& key = key_values(i); const K& key = key_values(i);
const V& value = value_values(i); const V& value = value_values(i);
const V& previous_value = gtl::LookupOrInsert(table_.get(), key, value); const V& previous_value = gtl::LookupOrInsert(table_.get(), key, value);
@ -95,7 +95,7 @@ class HashTable : public InitializableLookupTable {
const auto key_values = key.flat<K>(); const auto key_values = key.flat<K>();
auto value_values = value->flat<V>(); auto value_values = value->flat<V>();
for (size_t i = 0; i < key_values.size(); ++i) { for (int i = 0; i < key_values.size(); ++i) {
value_values(i) = value_values(i) =
gtl::FindWithDefault(*table_, key_values(i), default_val); gtl::FindWithDefault(*table_, key_values(i), default_val);
} }

View File

@ -285,7 +285,7 @@ Status HandleElementToLargerSlice(const Tensor& element, Tensor* parent,
slice_indices[0] = index; slice_indices[0] = index;
Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_size; Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_size;
slice_size[0] = 1; slice_size[0] = 1;
for (int i = 1; i < slice_size.size(); ++i) { for (size_t i = 1; i < slice_size.size(); ++i) {
slice_size[i] = element_t.dimension(i - 1); slice_size[i] = element_t.dimension(i - 1);
} }
parent_t.slice(slice_indices, slice_size) = element_t.reshape(slice_size); parent_t.slice(slice_indices, slice_size) = element_t.reshape(slice_size);

View File

@ -107,7 +107,7 @@ Status RandomShuffleQueue::Initialize() {
} }
void RandomShuffleQueue::DequeueLocked(OpKernelContext* ctx, Tuple* tuple) { void RandomShuffleQueue::DequeueLocked(OpKernelContext* ctx, Tuple* tuple) {
DCHECK_GT(queues_[0].size(), 0); DCHECK_GT(queues_[0].size(), size_t{0});
int64 index = generator_() % queues_[0].size(); int64 index = generator_() % queues_[0].size();
(*tuple).reserve(num_components()); (*tuple).reserve(num_components());
for (int i = 0; i < num_components(); ++i) { for (int i = 0; i < num_components(); ++i) {

View File

@ -97,7 +97,7 @@ void RangeSampler::SampleBatchGetExpectedCountAvoid(
} }
} }
} else { } else {
CHECK_EQ(avoided_values.size(), 0) CHECK_EQ(avoided_values.size(), size_t{0})
<< "avoided_values only supported with unique=true"; << "avoided_values only supported with unique=true";
for (int i = 0; i < batch_size; i++) { for (int i = 0; i < batch_size; i++) {
batch[i] = Sample(rnd); batch[i] = Sample(rnd);
@ -138,7 +138,7 @@ void AllSampler::SampleBatchGetExpectedCountAvoid(
batch_expected_count[i] = 1; batch_expected_count[i] = 1;
} }
} }
CHECK_EQ(0, avoided_values.size()); CHECK_EQ(size_t{0}, avoided_values.size());
CHECK_EQ(extras.size(), extras_expected_count.size()); CHECK_EQ(extras.size(), extras_expected_count.size());
for (size_t i = 0; i < extras.size(); i++) { for (size_t i = 0; i < extras.size(); i++) {
extras_expected_count[i] = 1; extras_expected_count[i] = 1;

View File

@ -67,7 +67,7 @@ void CheckErrors(OpKernelContext* context, int batch_dim, int seq_dim) {
"), ", "(", seq_lens.NumElements(), "), ", "(", seq_lens.NumElements(),
" vs. ", input.dim_size(batch_dim))); " vs. ", input.dim_size(batch_dim)));
for (int d = 0; d < seq_lens_vec.size(); ++d) { for (size_t d = 0; d < seq_lens_vec.size(); ++d) {
OP_REQUIRES(context, seq_lens_vec[d] >= 0, OP_REQUIRES(context, seq_lens_vec[d] >= 0,
errors::InvalidArgument("seq_lens(", d, ") < 0")); errors::InvalidArgument("seq_lens(", d, ") < 0"));
OP_REQUIRES(context, seq_lens_vec[d] <= input.dim_size(seq_dim), OP_REQUIRES(context, seq_lens_vec[d] <= input.dim_size(seq_dim),

View File

@ -466,7 +466,7 @@ class SparseSegmentGradOpBase : public OpKernel {
for (int64 i = 0; i < N; ++i) { for (int64 i = 0; i < N; ++i) {
scaling[segment_vec(i)] += 1; scaling[segment_vec(i)] += 1;
} }
for (int i = 0; i < scaling.size(); ++i) { for (size_t i = 0; i < scaling.size(); ++i) {
if (is_sqrtn_) { if (is_sqrtn_) {
scaling[i] = 1.0 / sqrt(std::max(scaling[i], 1.0)); scaling[i] = 1.0 / sqrt(std::max(scaling[i], 1.0));
} else { } else {

View File

@ -803,7 +803,7 @@ inline void SparseMatMulOp::ComputeBlockSizes(const ConstMatrixMap& left,
*JB = std::max(1, static_cast<int>(sqrt(num_threads) / 2.0)); *JB = std::max(1, static_cast<int>(sqrt(num_threads) / 2.0));
*IB = 8 * *JB; *IB = 8 * *JB;
DCHECK_EQ(N * sizeof(float) % 64, 0); DCHECK_EQ(N * sizeof(float) % 64, size_t{0});
} }
// Here is a an overview of the SparseMatMul code. Note that we assume that the // Here is a an overview of the SparseMatMul code. Note that we assume that the

View File

@ -48,7 +48,7 @@ class StringToNumberOp : public OpKernel {
&output_tensor)); &output_tensor));
auto output_flat = output_tensor->flat<OutputType>(); auto output_flat = output_tensor->flat<OutputType>();
for (std::size_t i = 0; i < input_flat.size(); ++i) { for (int i = 0; i < input_flat.size(); ++i) {
const char* s = input_flat(i).data(); const char* s = input_flat(i).data();
Convert(s, &output_flat(i), context); Convert(s, &output_flat(i), context);
} }

View File

@ -77,7 +77,7 @@ bool Arena::SatisfyAlignment(size_t alignment) {
freestart_ += waste; freestart_ += waste;
remaining_ -= waste; remaining_ -= waste;
} }
DCHECK_EQ(0, reinterpret_cast<size_t>(freestart_) & (alignment - 1)); DCHECK_EQ(size_t{0}, reinterpret_cast<size_t>(freestart_) & (alignment - 1));
return true; return true;
} }
@ -168,7 +168,7 @@ Arena::AllocatedBlock* Arena::AllocNewBlock(const size_t block_size,
const uint32 adjusted_alignment = const uint32 adjusted_alignment =
(alignment > 1 ? LeastCommonMultiple(alignment, kDefaultAlignment) : 1); (alignment > 1 ? LeastCommonMultiple(alignment, kDefaultAlignment) : 1);
CHECK_LE(adjusted_alignment, 1 << 20) CHECK_LE(adjusted_alignment, static_cast<uint32>(1 << 20))
<< "Alignment on boundaries greater than 1MB not supported."; << "Alignment on boundaries greater than 1MB not supported.";
// If block_size > alignment we force block_size to be a multiple // If block_size > alignment we force block_size to be a multiple

View File

@ -59,7 +59,7 @@ Histogram::Histogram(gtl::ArraySlice<double> custom_bucket_limits)
custom_bucket_limits.end()), custom_bucket_limits.end()),
bucket_limits_(custom_bucket_limits_) { bucket_limits_(custom_bucket_limits_) {
#ifndef NDEBUG #ifndef NDEBUG
DCHECK_GT(bucket_limits_.size(), 0); DCHECK_GT(bucket_limits_.size(), size_t{0});
// Verify that the bucket boundaries are strictly increasing // Verify that the bucket boundaries are strictly increasing
for (size_t i = 1; i < bucket_limits_.size(); i++) { for (size_t i = 1; i < bucket_limits_.size(); i++) {
DCHECK_GT(bucket_limits_[i], bucket_limits_[i - 1]); DCHECK_GT(bucket_limits_[i], bucket_limits_[i - 1]);

View File

@ -540,7 +540,7 @@ bool CompressInternal(const uint8* srcdata, int width, int height,
row_pointer[0] = reinterpret_cast<JSAMPLE*>(const_cast<JSAMPLE*>(r)); row_pointer[0] = reinterpret_cast<JSAMPLE*>(const_cast<JSAMPLE*>(r));
} }
} }
CHECK_EQ(jpeg_write_scanlines(&cinfo, row_pointer, 1), 1); CHECK_EQ(jpeg_write_scanlines(&cinfo, row_pointer, 1), 1u);
} }
jpeg_finish_compress(&cinfo); jpeg_finish_compress(&cinfo);

View File

@ -134,7 +134,7 @@ std::vector<string> Split(StringPiece text, char delim, Predicate p) {
std::vector<string> result; std::vector<string> result;
int token_start = 0; int token_start = 0;
if (!text.empty()) { if (!text.empty()) {
for (int i = 0; i < text.size() + 1; i++) { for (size_t i = 0; i < text.size() + 1; i++) {
if ((i == text.size()) || (text[i] == delim)) { if ((i == text.size()) || (text[i] == delim)) {
StringPiece token(text.data() + token_start, i - token_start); StringPiece token(text.data() + token_start, i - token_start);
if (p(token)) { if (p(token)) {

View File

@ -48,7 +48,7 @@ class DimComparator {
inline DimComparator(const TTypes<int64>::Matrix& ix, inline DimComparator(const TTypes<int64>::Matrix& ix,
const VarDimArray& order, int dims) const VarDimArray& order, int dims)
: ix_(ix), order_(order), dims_(dims) { : ix_(ix), order_(order), dims_(dims) {
CHECK_GT(order.size(), 0) << "Must order using at least one index"; CHECK_GT(order.size(), size_t{0}) << "Must order using at least one index";
CHECK_LE(order.size(), dims_) << "Can only sort up to dims"; CHECK_LE(order.size(), dims_) << "Can only sort up to dims";
for (size_t d = 0; d < order.size(); ++d) { for (size_t d = 0; d < order.size(); ++d) {
CHECK_GE(order[d], 0); CHECK_GE(order[d], 0);

View File

@ -321,7 +321,7 @@ bool SparseTensor::ToDense(Tensor* out, bool initialize) {
strides[d] = strides[d + 1] * out_shape.dim_size(d + 1); strides[d] = strides[d + 1] * out_shape.dim_size(d + 1);
} }
for (std::size_t n = 0; n < vals_t.dimension(0); ++n) { for (int n = 0; n < vals_t.dimension(0); ++n) {
bool invalid_dims = false; bool invalid_dims = false;
int64 ix = 0; int64 ix = 0;
for (int d = 0; d < dims_; ++d) { for (int d = 0; d < dims_; ++d) {
@ -340,7 +340,7 @@ bool SparseTensor::ToDense(Tensor* out, bool initialize) {
template <typename T> template <typename T>
SparseTensor SparseTensor::Concat( SparseTensor SparseTensor::Concat(
const gtl::ArraySlice<SparseTensor>& tensors) { const gtl::ArraySlice<SparseTensor>& tensors) {
CHECK_GE(tensors.size(), 1) << "Cannot concat 0 SparseTensors"; CHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors";
const int dims = tensors[0].dims_; const int dims = tensors[0].dims_;
CHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors"; CHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors";
auto order_0 = tensors[0].order(); auto order_0 = tensors[0].order();

View File

@ -92,7 +92,7 @@ const TensorSliceReader* TensorSliceReaderCache::GetReader(
} else { } else {
delete tmp_reader; delete tmp_reader;
} }
CHECK_EQ(1, still_opening_.erase(filepattern)); CHECK_EQ(size_t{1}, still_opening_.erase(filepattern));
VLOG(1) << "Cached TensorSliceReader for " << filepattern << ": " << reader; VLOG(1) << "Cached TensorSliceReader for " << filepattern << ": " << reader;
} else { } else {
auto cached_val = readers_[filepattern]; auto cached_val = readers_[filepattern];

View File

@ -113,7 +113,7 @@
" filename, _ = urlretrieve(url + filename, filename)\n", " filename, _ = urlretrieve(url + filename, filename)\n",
" statinfo = os.stat(filename)\n", " statinfo = os.stat(filename)\n",
" if statinfo.st_size == expected_bytes:\n", " if statinfo.st_size == expected_bytes:\n",
" print 'Found and verified', filename\n", " print('Found and verified', filename)\n",
" else:\n", " else:\n",
" raise Exception(\n", " raise Exception(\n",
" 'Failed to verify' + filename + '. Can you get to it with a browser?')\n", " 'Failed to verify' + filename + '. Can you get to it with a browser?')\n",
@ -237,9 +237,9 @@
"colab_type": "text" "colab_type": "text"
}, },
"source": [ "source": [
"Now let's load the data in a more manageable format.\n", "Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.\n",
"\n", "\n",
"We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road. The labels will be stored into a separate array of integers 0 through 9.\n", "We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road. \n",
"\n", "\n",
"A few images might not be readable, we'll just skip them." "A few images might not be readable, we'll just skip them."
] ]
@ -283,83 +283,143 @@
"image_size = 28 # Pixel width and height.\n", "image_size = 28 # Pixel width and height.\n",
"pixel_depth = 255.0 # Number of levels per pixel.\n", "pixel_depth = 255.0 # Number of levels per pixel.\n",
"\n", "\n",
"def load(data_folders, min_num_images, max_num_images):\n", "def load_letter(folder, min_num_images):\n",
" dataset = np.ndarray(\n", " image_files = os.listdir(folder)\n",
" shape=(max_num_images, image_size, image_size), dtype=np.float32)\n", " dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n",
" labels = np.ndarray(shape=(max_num_images), dtype=np.int32)\n", " dtype=np.float32)\n",
" label_index = 0\n", " image_index = 0\n",
" image_index = 0\n", " print folder\n",
" for folder in data_folders:\n",
" print(folder)\n",
" for image in os.listdir(folder):\n", " for image in os.listdir(folder):\n",
" if image_index >= max_num_images:\n",
" raise Exception('More images than expected: %d >= %d' % (\n",
" image_index, max_num_images))\n",
" image_file = os.path.join(folder, image)\n", " image_file = os.path.join(folder, image)\n",
" try:\n", " try:\n",
" image_data = (ndimage.imread(image_file).astype(float) -\n", " image_data = (ndimage.imread(image_file).astype(float) - \n",
" pixel_depth / 2) / pixel_depth\n", " pixel_depth / 2) / pixel_depth\n",
" if image_data.shape != (image_size, image_size):\n", " if image_data.shape != (image_size, image_size):\n",
" raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n", " raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n",
" dataset[image_index, :, :] = image_data\n", " dataset[image_index, :, :] = image_data\n",
" labels[image_index] = label_index\n",
" image_index += 1\n", " image_index += 1\n",
" except IOError as e:\n", " except IOError as e:\n",
" print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n", " print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n",
" label_index += 1\n", " \n",
" num_images = image_index\n", " num_images = image_index\n",
" dataset = dataset[0:num_images, :, :]\n", " dataset = dataset[0:num_images, :, :]\n",
" labels = labels[0:num_images]\n", " if num_images < min_num_images:\n",
" if num_images < min_num_images:\n", " raise Exception('Many fewer images than expected: %d < %d' % \n",
" raise Exception('Many fewer images than expected: %d < %d' % (\n", " (num_images, min_num_images))\n",
" num_images, min_num_images))\n", " \n",
" print('Full dataset tensor:', dataset.shape)\n", " print('Full dataset tensor:', dataset.shape)\n",
" print('Mean:', np.mean(dataset))\n", " print('Mean:', np.mean(dataset))\n",
" print('Standard deviation:', np.std(dataset))\n", " print('Standard deviation:', np.std(dataset))\n",
" print('Labels:', labels.shape)\n", " return dataset\n",
" return dataset, labels\n", " \n",
"train_dataset, train_labels = load(train_folders, 450000, 550000)\n", "def load(data_folders, min_num_images_per_class):\n",
"test_dataset, test_labels = load(test_folders, 18000, 20000)" " dataset_names = []\n",
" for folder in data_folders:\n",
" dataset = load_letter(folder, min_num_images_per_class)\n",
" set_filename = folder + '.pickle'\n",
" try:\n",
" with open(set_filename, 'wb') as f:\n",
" pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n",
" dataset_names.append(set_filename)\n",
" except Exception as e:\n",
" print('Unable to save data to', pickle_file, ':', e)\n",
" \n",
" return dataset_names\n",
"\n",
"train_datasets = load(train_folders, 45000)\n",
"test_datasets = load(test_folders, 1800)"
], ],
"outputs": [ "outputs": [
{ {
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"notMNIST_large/A\n", "notMNIST_large/A\n",
"Could not read: notMNIST_large/A/SG90IE11c3RhcmQgQlROIFBvc3Rlci50dGY=.png : cannot identify image file - it's ok, skipping.\n",
"Could not read: notMNIST_large/A/RnJlaWdodERpc3BCb29rSXRhbGljLnR0Zg==.png : cannot identify image file - it's ok, skipping.\n",
"Could not read: notMNIST_large/A/Um9tYW5hIEJvbGQucGZi.png : cannot identify image file - it's ok, skipping.\n", "Could not read: notMNIST_large/A/Um9tYW5hIEJvbGQucGZi.png : cannot identify image file - it's ok, skipping.\n",
"Could not read: notMNIST_large/A/RnJlaWdodERpc3BCb29rSXRhbGljLnR0Zg==.png : cannot identify image file - it's ok, skipping.\n",
"Could not read: notMNIST_large/A/SG90IE11c3RhcmQgQlROIFBvc3Rlci50dGY=.png : cannot identify image file - it's ok, skipping.\n",
"Full dataset tensor: (52909, 28, 28)\n",
"Mean: -0.12848\n",
"Standard deviation: 0.425576\n",
"notMNIST_large/B\n", "notMNIST_large/B\n",
"Could not read: notMNIST_large/B/TmlraXNFRi1TZW1pQm9sZEl0YWxpYy5vdGY=.png : cannot identify image file - it's ok, skipping.\n", "Could not read: notMNIST_large/B/TmlraXNFRi1TZW1pQm9sZEl0YWxpYy5vdGY=.png : cannot identify image file - it's ok, skipping.\n",
"Full dataset tensor: (52911, 28, 28)\n",
"Mean: -0.00755947\n",
"Standard deviation: 0.417272\n",
"notMNIST_large/C\n", "notMNIST_large/C\n",
"Full dataset tensor: (52912, 28, 28)\n",
"Mean: -0.142321\n",
"Standard deviation: 0.421305\n",
"notMNIST_large/D\n", "notMNIST_large/D\n",
"Could not read: notMNIST_large/D/VHJhbnNpdCBCb2xkLnR0Zg==.png : cannot identify image file - it's ok, skipping.\n", "Could not read: notMNIST_large/D/VHJhbnNpdCBCb2xkLnR0Zg==.png : cannot identify image file - it's ok, skipping.\n",
"Full dataset tensor: (52911, 28, 28)\n",
"Mean: -0.0574553\n",
"Standard deviation: 0.434072\n",
"notMNIST_large/E\n", "notMNIST_large/E\n",
"Full dataset tensor: (52912, 28, 28)\n",
"Mean: -0.0701406\n",
"Standard deviation: 0.42882\n",
"notMNIST_large/F\n", "notMNIST_large/F\n",
"Full dataset tensor: (52912, 28, 28)\n",
"Mean: -0.125914\n",
"Standard deviation: 0.429645\n",
"notMNIST_large/G\n", "notMNIST_large/G\n",
"Full dataset tensor: (52912, 28, 28)\n",
"Mean: -0.0947771\n",
"Standard deviation: 0.421674\n",
"notMNIST_large/H\n", "notMNIST_large/H\n",
"Full dataset tensor: (52912, 28, 28)\n",
"Mean: -0.0687667\n",
"Standard deviation: 0.430344\n",
"notMNIST_large/I\n", "notMNIST_large/I\n",
"Full dataset tensor: (52912, 28, 28)\n",
"Mean: 0.0307405\n",
"Standard deviation: 0.449686\n",
"notMNIST_large/J\n", "notMNIST_large/J\n",
"Full dataset tensor: (529114, 28, 28)\n", "Full dataset tensor: (52911, 28, 28)\n",
"Mean: -0.0816593\n", "Mean: -0.153479\n",
"Standard deviation: 0.454232\n", "Standard deviation: 0.397169\n",
"Labels: (529114,)\n",
"notMNIST_small/A\n", "notMNIST_small/A\n",
"Could not read: notMNIST_small/A/RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png : cannot identify image file - it's ok, skipping.\n", "Could not read: notMNIST_small/A/RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png : cannot identify image file - it's ok, skipping.\n",
"Full dataset tensor: (1872, 28, 28)\n",
"Mean: -0.132588\n",
"Standard deviation: 0.445923\n",
"notMNIST_small/B\n", "notMNIST_small/B\n",
"Full dataset tensor: (1873, 28, 28)\n",
"Mean: 0.00535619\n",
"Standard deviation: 0.457054\n",
"notMNIST_small/C\n", "notMNIST_small/C\n",
"Full dataset tensor: (1873, 28, 28)\n",
"Mean: -0.141489\n",
"Standard deviation: 0.441056\n",
"notMNIST_small/D\n", "notMNIST_small/D\n",
"Full dataset tensor: (1873, 28, 28)\n",
"Mean: -0.0492094\n",
"Standard deviation: 0.460477\n",
"notMNIST_small/E\n", "notMNIST_small/E\n",
"Full dataset tensor: (1873, 28, 28)\n",
"Mean: -0.0598952\n",
"Standard deviation: 0.456146\n",
"notMNIST_small/F\n", "notMNIST_small/F\n",
"Could not read: notMNIST_small/F/Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png : cannot identify image file - it's ok, skipping.\n", "Could not read: notMNIST_small/F/Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png : cannot identify image file - it's ok, skipping.\n",
"Full dataset tensor: (1872, 28, 28)\n",
"Mean: -0.118148\n",
"Standard deviation: 0.451134\n",
"notMNIST_small/G\n", "notMNIST_small/G\n",
"Full dataset tensor: (1872, 28, 28)\n",
"Mean: -0.092519\n",
"Standard deviation: 0.448468\n",
"notMNIST_small/H\n", "notMNIST_small/H\n",
"Full dataset tensor: (1872, 28, 28)\n",
"Mean: -0.0586729\n",
"Standard deviation: 0.457387\n",
"notMNIST_small/I\n", "notMNIST_small/I\n",
"Full dataset tensor: (1872, 28, 28)\n",
"Mean: 0.0526481\n",
"Standard deviation: 0.472657\n",
"notMNIST_small/J\n", "notMNIST_small/J\n",
"Full dataset tensor: (18724, 28, 28)\n", "Full dataset tensor: (1872, 28, 28)\n",
"Mean: -0.0746364\n", "Mean: -0.15167\n",
"Standard deviation: 0.458622\n", "Standard deviation: 0.449521\n"
"Labels: (18724,)\n"
], ],
"name": "stdout" "name": "stdout"
} }
@ -382,6 +442,134 @@
"---" "---"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {
"id": "cYznx5jUwzoO",
"colab_type": "text"
},
"source": [
"---\n",
"Problem 3\n",
"---------\n",
"Another check: we expect the data to be balanced across classes. Verify that.\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "LA7M7K22ynCt",
"colab_type": "text"
},
"source": [
"Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune `train_size` as needed. The labels will be stored into a separate array of integers 0 through 9.\n",
"\n",
"Also create a validation dataset for hyperparameter tuning."
]
},
{
"cell_type": "code",
"metadata": {
"id": "s3mWgZLpyuzq",
"colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
"output_extras": [
{
"item_id": 1
}
]
},
"cellView": "both",
"executionInfo": {
"elapsed": 411281,
"status": "ok",
"timestamp": 1444485897869,
"user": {
"color": "#1FA15D",
"displayName": "Vincent Vanhoucke",
"isAnonymous": false,
"isMe": true,
"permissionId": "05076109866853157986",
"photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg",
"sessionId": "2a0a5e044bb03b66",
"userId": "102167687554210253930"
},
"user_tz": 420
},
"outputId": "8af66da6-902d-4719-bedc-7c9fb7ae7948"
},
"source": [
"def make_arrays(nb_rows, img_size):\n",
" if nb_rows:\n",
" dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)\n",
" labels = np.ndarray(nb_rows, dtype=np.int32)\n",
" else:\n",
" dataset, labels = None, None\n",
" return dataset, labels\n",
"\n",
"def merge_datasets(pickle_files, train_size, valid_size=0):\n",
" num_classes = len(pickle_files)\n",
" valid_dataset, valid_labels = make_arrays(valid_size, image_size)\n",
" train_dataset, train_labels = make_arrays(train_size, image_size)\n",
" vsize_per_class = valid_size // num_classes\n",
" tsize_per_class = train_size // num_classes\n",
" \n",
" start_v, start_t = 0, 0\n",
" end_v, end_t = vsize_per_class, tsize_per_class\n",
" end_l = vsize_per_class+tsize_per_class\n",
" for label, pickle_file in enumerate(pickle_files): \n",
" try:\n",
" with open(pickle_file, 'rb') as f:\n",
" letter_set = pickle.load(f)\n",
" if valid_dataset is not None:\n",
" valid_letter = letter_set[:vsize_per_class, :, :]\n",
" valid_dataset[start_v:end_v, :, :] = valid_letter\n",
" valid_labels[start_v:end_v] = label\n",
" start_v += vsize_per_class\n",
" end_v += vsize_per_class\n",
" \n",
" train_letter = letter_set[vsize_per_class:end_l, :, :]\n",
" train_dataset[start_t:end_t, :, :] = train_letter\n",
" train_labels[start_t:end_t] = label\n",
" start_t += tsize_per_class\n",
" end_t += tsize_per_class\n",
" except Exception as e:\n",
" print('Unable to process data from', pickle_file, ':', e)\n",
" raise\n",
" \n",
" return valid_dataset, valid_labels, train_dataset, train_labels\n",
" \n",
" \n",
"train_size = 200000\n",
"valid_size = 10000\n",
"test_size = 10000\n",
"\n",
"valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(train_datasets, train_size, valid_size)\n",
"__, __, test_dataset, test_labels = merge_datasets(test_datasets, test_size)\n",
"\n",
"print('Training:', train_dataset.shape, train_labels.shape)\n",
"print('Validation:', valid_dataset.shape, valid_labels.shape)\n",
"print('Testing:', test_dataset.shape, test_labels.shape)"
],
"outputs": [
{
"output_type": "stream",
"text": [
"Training (200000, 28, 28) (200000,)\n",
"Validation (10000, 28, 28) (10000,)\n",
"Testing (10000, 28, 28) (10000,)\n"
],
"name": "stdout"
}
],
"execution_count": 0
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {
@ -426,98 +614,13 @@
}, },
"source": [ "source": [
"---\n", "---\n",
"Problem 3\n", "Problem 4\n",
"---------\n", "---------\n",
"Convince yourself that the data is still good after shuffling!\n", "Convince yourself that the data is still good after shuffling!\n",
"\n", "\n",
"---" "---"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {
"id": "cYznx5jUwzoO",
"colab_type": "text"
},
"source": [
"---\n",
"Problem 4\n",
"---------\n",
"Another check: we expect the data to be balanced across classes. Verify that.\n",
"\n",
"---"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "LA7M7K22ynCt",
"colab_type": "text"
},
"source": [
"Prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed.\n",
"\n",
"Also create a validation dataset for hyperparameter tuning."
]
},
{
"cell_type": "code",
"metadata": {
"id": "s3mWgZLpyuzq",
"colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
"output_extras": [
{
"item_id": 1
}
]
},
"cellView": "both",
"executionInfo": {
"elapsed": 411281,
"status": "ok",
"timestamp": 1444485897869,
"user": {
"color": "#1FA15D",
"displayName": "Vincent Vanhoucke",
"isAnonymous": false,
"isMe": true,
"permissionId": "05076109866853157986",
"photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg",
"sessionId": "2a0a5e044bb03b66",
"userId": "102167687554210253930"
},
"user_tz": 420
},
"outputId": "8af66da6-902d-4719-bedc-7c9fb7ae7948"
},
"source": [
"train_size = 200000\n",
"valid_size = 10000\n",
"\n",
"valid_dataset = train_dataset[:valid_size,:,:]\n",
"valid_labels = train_labels[:valid_size]\n",
"train_dataset = train_dataset[valid_size:valid_size+train_size,:,:]\n",
"train_labels = train_labels[valid_size:valid_size+train_size]\n",
"print('Training', train_dataset.shape, train_labels.shape)\n",
"print('Validation', valid_dataset.shape, valid_labels.shape)"
],
"outputs": [
{
"output_type": "stream",
"text": [
"Training (200000, 28, 28) (200000,)\n",
"Validation (10000, 28, 28) (10000,)\n"
],
"name": "stdout"
}
],
"execution_count": 0
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": { "metadata": {

View File

@ -265,7 +265,7 @@
" [patch_size, patch_size, depth, depth], stddev=0.1))\n", " [patch_size, patch_size, depth, depth], stddev=0.1))\n",
" layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))\n", " layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))\n",
" layer3_weights = tf.Variable(tf.truncated_normal(\n", " layer3_weights = tf.Variable(tf.truncated_normal(\n",
" [image_size / 4 * image_size / 4 * depth, num_hidden], stddev=0.1))\n", " [image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))\n",
" layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))\n", " layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))\n",
" layer4_weights = tf.Variable(tf.truncated_normal(\n", " layer4_weights = tf.Variable(tf.truncated_normal(\n",
" [num_hidden, num_labels], stddev=0.1))\n", " [num_hidden, num_labels], stddev=0.1))\n",

View File

@ -114,7 +114,7 @@
" if statinfo.st_size == expected_bytes:\n", " if statinfo.st_size == expected_bytes:\n",
" print('Found and verified %s' % filename)\n", " print('Found and verified %s' % filename)\n",
" else:\n", " else:\n",
" print statinfo.st_size\n", " print(statinfo.st_size)\n",
" raise Exception(\n", " raise Exception(\n",
" 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n", " 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n",
" return filename\n", " return filename\n",
@ -354,35 +354,31 @@
" data_index = (data_index + 1) % len(data)\n", " data_index = (data_index + 1) % len(data)\n",
" return batch, labels\n", " return batch, labels\n",
"\n", "\n",
"batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)\n", "print('data:', [reverse_dictionary[di] for di in data[:8]])\n",
"for i in range(8):\n", "\n",
" print('%d -> %d' % (batch[i], labels[i, 0]))\n", "for num_skips, skip_window in [(2, 1), (4, 2)]:\n",
" print('%s -> %s' % (reverse_dictionary[batch[i]],\n", " data_index = 0\n",
" reverse_dictionary[labels[i, 0]]))" " batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)\n",
" print('\\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))\n",
" print(' batch:', [reverse_dictionary[bi] for bi in batch])\n",
" print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])"
], ],
"outputs": [ "outputs": [
{ {
"output_type": "stream", "name": "stdout",
"text": [ "output_type": "stream",
" 3083 -> 5243\n", "text": [
"originated -> anarchism\n", "data: ['anarchism', 'originated', 'as', 'a', 'term', 'of', 'abuse', 'first']\n",
"3083 -> 12\n", "\n",
"originated -> as\n", "with num_skips = 2 and skip_window = 1:\n",
"12 -> 3083\n", " batch: ['originated', 'originated', 'as', 'as', 'a', 'a', 'term', 'term']\n",
"as -> originated\n", " labels: ['as', 'anarchism', 'a', 'originated', 'term', 'as', 'a', 'of']\n",
"12 -> 6\n", "\n",
"as -> a\n", "with num_skips = 4 and skip_window = 2:\n",
"6 -> 12\n", " batch: ['as', 'as', 'as', 'as', 'a', 'a', 'a', 'a']\n",
"a -> as\n", " labels: ['anarchism', 'originated', 'term', 'a', 'as', 'of', 'originated', 'term']\n"
"6 -> 195\n", ]
"a -> term\n", }
"195 -> 6\n",
"term -> a\n",
"195 -> 2\n",
"term -> of\n"
],
"name": "stdout"
}
], ],
"execution_count": 0 "execution_count": 0
}, },

View File

@ -1,6 +1,8 @@
Assignments for Udacity Deep Learning class with TensorFlow Assignments for Udacity Deep Learning class with TensorFlow
=========================================================== ===========================================================
Course information can be found at https://www.udacity.com/course/deep-learning--ud730
Running the Docker container from the Google Cloud repository Running the Docker container from the Google Cloud repository
------------------------------------------------------------- -------------------------------------------------------------
@ -44,8 +46,18 @@ Building a local Docker container
Running the local container Running the local container
--------------------------- ---------------------------
To run a disposable container:
docker run -p 8888:8888 -it --rm $USER/assignments docker run -p 8888:8888 -it --rm $USER/assignments
Note the above command will create an ephemeral container and all data stored in the container will be lost when the container stops.
To avoid losing work between sessions in the container, it is recommended that you mount the `tensorflow/examples/udacity` directory into the container:
docker run -p 8888:8888 -v </path/to/tensorflow/examples/udacity>:/notebooks -it --rm $USER/assignments
This will allow you to save work and have access to generated files on the host filesystem.
Pushing a Google Cloud release Pushing a Google Cloud release
------------------------------ ------------------------------

View File

@ -303,7 +303,9 @@ $ git clone --recurse-submodules https://github.com/tensorflow/tensorflow
``` ```
`--recurse-submodules` is required to fetch the protobuf library that TensorFlow `--recurse-submodules` is required to fetch the protobuf library that TensorFlow
depends on. depends on. Note that these instructions will install the latest master branch
of tensorflow. If you want to install a specific branch (such as a release branch),
pass `-b <branchname>` to the `git clone` command.
### Installation for Linux ### Installation for Linux

View File

@ -263,6 +263,7 @@ tf_gen_op_wrapper_py(
py_library( py_library(
name = "functional_ops_lib", name = "functional_ops_lib",
srcs = ["ops/functional_ops.py"], srcs = ["ops/functional_ops.py"],
srcs_version = "PY2AND3",
deps = [ deps = [
":functional_ops", ":functional_ops",
], ],

View File

@ -520,7 +520,7 @@ def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
weights: a 2D tensor. Dimensions typically: in_units, out_units weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified name: A name for the operation (optional). If not specified
"wx_plus_b" is used. "xw_plus_b" is used.
Returns: Returns:
A 2-D Tensor computing matmul(x, weights) + biases. A 2-D Tensor computing matmul(x, weights) + biases.

View File

@ -42,7 +42,7 @@ to docker caching. Individual builds are fast thanks to bazel caching.
2. Clone tensorflow repository. 2. Clone tensorflow repository.
```bash ```bash
git clone https://github.com/tensorflow/tensorflow.git git clone --recurse-submodules https://github.com/tensorflow/tensorflow.git
``` ```
3. Go to tensorflow directory 3. Go to tensorflow directory
@ -77,6 +77,9 @@ tensorflow/tools/ci_build/ci_build.sh GPU tensorflow/tools/ci_build/builds/gpu_p
# build android example app # build android example app
tensorflow/tools/ci_build/ci_build.sh ANDROID tensorflow/tools/ci_build/builds/android.sh tensorflow/tools/ci_build/ci_build.sh ANDROID tensorflow/tools/ci_build/builds/android.sh
# run bash inside the container
CI_DOCKER_EXTRA_PARAMS='-it --rm' CI_COMMAND_PREFIX='' tensorflow/tools/ci_build/ci_build.sh CPU /bin/bash
``` ```
**Note**: The set of jobs and how they are triggered is still evolving. **Note**: The set of jobs and how they are triggered is still evolving.

View File

@ -14,12 +14,14 @@
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
# This script is a wrapper creating inside container the same user as the one # This script is a wrapper creating the same user inside container as the one
# running the ci_build.sh outside of container. It also set the home for that # running the ci_build.sh outside the container. It also set the home directory
# user to in the workspace directory. # for the user inside container to match the same absolute path as the workspace
# outside of continer.
# We do this so that the bazel running inside container generate symbolic links # We do this so that the bazel running inside container generate symbolic links
# and user permissions which makes sense outside of container. # and user permissions which makes sense outside of container.
# Do not run this outside of docker. It does not make sense. # Do not run this manually. It does not make sense. It is intended to be called
# by ci_build.sh only.
set -e set -e

View File

@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
# Get the command line arguments. # Get the command line arguments.
CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' ) CONTAINER_TYPE=$( echo "$1" | tr '[:upper:]' '[:lower:]' )
shift 1 shift 1
@ -32,6 +33,12 @@ if [ "$#" -lt 1 ] || [[ ! "${CONTAINER_TYPE}" =~ ^(cpu|gpu|android)$ ]]; then
fi fi
# Optional arguments - environment variables. For example:
# CI_DOCKER_EXTRA_PARAMS='-it --rm' CI_COMMAND_PREFIX='' tensorflow/tools/ci_build/ci_build.sh CPU /bin/bash
CI_DOCKER_EXTRA_PARAMS=("${CI_DOCKER_EXTRA_PARAMS[@]:---rm}")
CI_COMMAND_PREFIX=("${CI_COMMAND_PREFIX[@]:-tensorflow/tools/ci_build/builds/with_the_same_user tensorflow/tools/ci_build/builds/configured ${CONTAINER_TYPE}}")
# Figure out the directory where this script is. # Figure out the directory where this script is.
SCRIPT_DIR=$( cd ${0%/*} && pwd -P ) SCRIPT_DIR=$( cd ${0%/*} && pwd -P )
@ -48,10 +55,22 @@ WORKSPACE="${WORKSPACE:-$(upsearch WORKSPACE)}"
BUILD_TAG="${BUILD_TAG:-tf_ci}" BUILD_TAG="${BUILD_TAG:-tf_ci}"
# Add extra params for cuda devices and libraries for GPU container.
if [ "${CONTAINER_TYPE}" == "gpu" ]; then
devices=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
libs=$(\ls /usr/lib/x86_64-linux-gnu/libcuda* | xargs -I{} echo '-v {}:{}')
GPU_EXTRA_PARAMS="${devices} ${libs}"
else
GPU_EXTRA_PARAMS=""
fi
# Print arguments. # Print arguments.
echo "CONTAINER_TYPE: ${CONTAINER_TYPE}"
echo "COMMAND: ${COMMAND[@]}"
echo "WORKSAPCE: ${WORKSPACE}" echo "WORKSAPCE: ${WORKSPACE}"
echo "CI_DOCKER_EXTRA_PARAMS: ${CI_DOCKER_EXTRA_PARAMS[@]}"
echo "COMMAND: ${COMMAND[@]}"
echo "CI_COMMAND_PREFIX: ${CI_COMMAND_PREFIX[@]}"
echo "CONTAINER_TYPE: ${CONTAINER_TYPE}"
echo "BUILD_TAG: ${BUILD_TAG}" echo "BUILD_TAG: ${BUILD_TAG}"
echo " (docker container name will be ${BUILD_TAG}.${CONTAINER_TYPE})" echo " (docker container name will be ${BUILD_TAG}.${CONTAINER_TYPE})"
echo "" echo ""
@ -67,7 +86,6 @@ docker build -t ${BUILD_TAG}.${CONTAINER_TYPE} \
echo "Running '${COMMAND[@]}' inside ${BUILD_TAG}.${CONTAINER_TYPE}..." echo "Running '${COMMAND[@]}' inside ${BUILD_TAG}.${CONTAINER_TYPE}..."
mkdir -p ${WORKSPACE}/bazel-ci_build-cache mkdir -p ${WORKSPACE}/bazel-ci_build-cache
docker run \ docker run \
--rm \
-v ${WORKSPACE}/bazel-ci_build-cache:${WORKSPACE}/bazel-ci_build-cache \ -v ${WORKSPACE}/bazel-ci_build-cache:${WORKSPACE}/bazel-ci_build-cache \
-e "CI_BUILD_HOME=${WORKSPACE}/bazel-ci_build-cache" \ -e "CI_BUILD_HOME=${WORKSPACE}/bazel-ci_build-cache" \
-e "CI_BUILD_USER=${USER}" \ -e "CI_BUILD_USER=${USER}" \
@ -76,9 +94,8 @@ docker run \
-e "CI_BUILD_GID=$(id -g $USER)" \ -e "CI_BUILD_GID=$(id -g $USER)" \
-v ${WORKSPACE}:/tensorflow \ -v ${WORKSPACE}:/tensorflow \
-w /tensorflow \ -w /tensorflow \
${CI_BUILD_DOCKER_RUN_EXTRA_PARAMETERS[@]} \ ${GPU_EXTRA_PARAMS} \
${CI_DOCKER_EXTRA_PARAMS[@]} \
"${BUILD_TAG}.${CONTAINER_TYPE}" \ "${BUILD_TAG}.${CONTAINER_TYPE}" \
${CI_BUILD_DOCKER_RUN_COMMAND_PREFIX[@]} \ ${CI_COMMAND_PREFIX[@]} \
"tensorflow/tools/ci_build/builds/with_the_same_user" \ ${COMMAND[@]}
"tensorflow/tools/ci_build/builds/configured" \
"${CONTAINER_TYPE}" ${COMMAND[@]}