Automated rollback of commit 24787842ad

PiperOrigin-RevId: 211895566
This commit is contained in:
A. Unique TensorFlower 2018-09-06 17:11:37 -07:00 committed by TensorFlower Gardener
parent e001f3ad84
commit d6f1077614
16 changed files with 206 additions and 30 deletions

View File

@ -20,6 +20,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_AOT_EMBEDDED_PROTOCOL_BUFFERS_H_ #ifndef TENSORFLOW_COMPILER_AOT_EMBEDDED_PROTOCOL_BUFFERS_H_
#define TENSORFLOW_COMPILER_AOT_EMBEDDED_PROTOCOL_BUFFERS_H_ #define TENSORFLOW_COMPILER_AOT_EMBEDDED_PROTOCOL_BUFFERS_H_
#include "absl/strings/string_view.h"
#include "absl/types/span.h" #include "absl/types/span.h"
#include "tensorflow/compiler/xla/statusor.h" #include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/protobuf.h"

View File

@ -35,6 +35,7 @@ limitations under the License.
#include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/tensor_id.h" #include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/numbers.h" #include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/platform/init_main.h"
@ -92,9 +93,8 @@ Status Main(const MainFlags& flags) {
// Write output files. // Write output files.
Env* env = Env::Default(); Env* env = Env::Default();
const std::vector<char>& obj = compile_result.aot->object_file_data(); const std::vector<char>& obj = compile_result.aot->object_file_data();
TF_RETURN_IF_ERROR( TF_RETURN_IF_ERROR(WriteStringToFile(env, flags.out_function_object,
WriteStringToFile(env, flags.out_function_object, StringPiece(obj.data(), obj.size())));
absl::string_view(obj.data(), obj.size())));
CodegenOpts codegen_opts; CodegenOpts codegen_opts;
codegen_opts.gen_name_to_index = flags.gen_name_to_index; codegen_opts.gen_name_to_index = flags.gen_name_to_index;
codegen_opts.gen_program_shape = flags.gen_program_shape; codegen_opts.gen_program_shape = flags.gen_program_shape;

View File

@ -633,7 +633,7 @@ TEST(XlaCompilationTest, IllegalCycle_UsefulErrorMessage) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError(); Scope root = Scope::NewRootScope().ExitOnError();
{ {
auto BuildNoopNode = [](absl::string_view name, Graph* graph) { auto BuildNoopNode = [](StringPiece name, Graph* graph) {
NodeDefBuilder builder(name, "NoOp"); NodeDefBuilder builder(name, "NoOp");
NodeDef def; NodeDef def;
TF_CHECK_OK(builder.Finalize(&def)); TF_CHECK_OK(builder.Finalize(&def));

View File

@ -18,6 +18,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_ #ifndef TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_
#define TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_
#include "absl/strings/string_view.h"
#include "absl/types/optional.h" #include "absl/types/optional.h"
#include "tensorflow/compiler/jit/graphcycles/graphcycles.h" #include "tensorflow/compiler/jit/graphcycles/graphcycles.h"
#include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/algorithm.h"

View File

@ -339,11 +339,11 @@ void XlaDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
} }
void XlaDeviceContext::CopyDeviceTensorToCPU(const Tensor* device_tensor, void XlaDeviceContext::CopyDeviceTensorToCPU(const Tensor* device_tensor,
absl::string_view tensor_name, StringPiece tensor_name,
Device* device, Tensor* cpu_tensor, Device* device, Tensor* cpu_tensor,
StatusCallback done) { StatusCallback done) {
manager_.CopyDeviceTensorToCPU(device_tensor, tensor_name, device, cpu_tensor, manager_.CopyDeviceTensorToCPU(device_tensor, absl::string_view(tensor_name),
done); device, cpu_tensor, done);
} }
void XlaDeviceContext::CopyDeviceTensorToDevice(const Tensor& src_tensor, void XlaDeviceContext::CopyDeviceTensorToDevice(const Tensor& src_tensor,

View File

@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/stringpiece.h"
namespace tensorflow { namespace tensorflow {
@ -110,9 +111,12 @@ class XlaDeviceContext : public DeviceContext {
void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device, void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
Tensor* device_tensor, Tensor* device_tensor,
StatusCallback done) const override; StatusCallback done) const override;
// TODO(rlahaye): Replace StringPiece with absl::string_view when the
// StringPiece->absl::string_view change is rolled forward.
void CopyDeviceTensorToCPU(const Tensor* device_tensor, void CopyDeviceTensorToCPU(const Tensor* device_tensor,
absl::string_view tensor_name, Device* device, StringPiece tensor_name, // non-ABSL OK
Tensor* cpu_tensor, StatusCallback done) override; Device* device, Tensor* cpu_tensor,
StatusCallback done) override;
void CopyDeviceTensorToDevice(const Tensor& src_tensor, Tensor* dst_tensor, void CopyDeviceTensorToDevice(const Tensor& src_tensor, Tensor* dst_tensor,
const StatusCallback& done); const StatusCallback& done);

View File

@ -214,6 +214,7 @@ cc_library(
"//tensorflow/core:protos_all_cc", "//tensorflow/core:protos_all_cc",
"//tensorflow/core:stream_executor_no_cuda", "//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory", "@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/types:span", "@com_google_absl//absl/types:span",
], ],
alwayslink = 1, alwayslink = 1,

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/resource_operation_table.h" #include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "absl/algorithm/container.h" #include "absl/algorithm/container.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/flatmap.h" #include "tensorflow/core/lib/gtl/flatmap.h"
namespace tensorflow { namespace tensorflow {
@ -30,11 +31,10 @@ namespace tensorflow {
} }
} }
static gtl::FlatMap<absl::string_view, XlaResourceOpInfo>* static gtl::FlatMap<StringPiece, XlaResourceOpInfo>* CreateResourceOpInfoMap() {
CreateResourceOpInfoMap() { auto* result = new gtl::FlatMap<StringPiece, XlaResourceOpInfo>;
auto* result = new gtl::FlatMap<absl::string_view, XlaResourceOpInfo>;
auto add = [&](absl::string_view op, XlaResourceOpKind op_kind, auto add = [&](StringPiece op, XlaResourceOpKind op_kind,
XlaResourceKind resource_kind) { XlaResourceKind resource_kind) {
auto insert_result = auto insert_result =
result->insert({op, XlaResourceOpInfo(op_kind, resource_kind)}); result->insert({op, XlaResourceOpInfo(op_kind, resource_kind)});
@ -103,17 +103,17 @@ CreateResourceOpInfoMap() {
return result; return result;
} }
static const gtl::FlatMap<absl::string_view, XlaResourceOpInfo>& static const gtl::FlatMap<StringPiece, XlaResourceOpInfo>&
GetStaticResourceOpInfoMap() { GetStaticResourceOpInfoMap() {
static gtl::FlatMap<absl::string_view, XlaResourceOpInfo>* op_info_map = static gtl::FlatMap<StringPiece, XlaResourceOpInfo>* op_info_map =
CreateResourceOpInfoMap(); CreateResourceOpInfoMap();
return *op_info_map; return *op_info_map;
} }
const XlaResourceOpInfo* GetResourceOpInfoForOp(absl::string_view op) { const XlaResourceOpInfo* GetResourceOpInfoForOp(absl::string_view op) {
const gtl::FlatMap<absl::string_view, XlaResourceOpInfo>& op_infos = const gtl::FlatMap<StringPiece, XlaResourceOpInfo>& op_infos =
GetStaticResourceOpInfoMap(); GetStaticResourceOpInfoMap();
auto it = op_infos.find(op); auto it = op_infos.find(StringPiece(op.data(), op.length()));
return it == op_infos.end() ? nullptr : &it->second; return it == op_infos.end() ? nullptr : &it->second;
} }
@ -121,7 +121,7 @@ namespace resource_op_table_internal {
std::vector<absl::string_view> GetKnownResourceOps() { std::vector<absl::string_view> GetKnownResourceOps() {
std::vector<absl::string_view> result; std::vector<absl::string_view> result;
for (const auto& p : GetStaticResourceOpInfoMap()) { for (const auto& p : GetStaticResourceOpInfoMap()) {
result.push_back(p.first); result.push_back(absl::string_view(p.first));
} }
absl::c_sort(result); absl::c_sort(result);
return result; return result;

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <unordered_map> #include <unordered_map>
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h" #include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/kernel_def.pb.h" #include "tensorflow/core/framework/kernel_def.pb.h"

View File

@ -102,7 +102,8 @@ Status XlaOpKernelContext::ConstantInput(int index,
static xla::StatusOr<int> InputIndex(XlaOpKernelContext* context, static xla::StatusOr<int> InputIndex(XlaOpKernelContext* context,
absl::string_view name) { absl::string_view name) {
int start, stop; int start, stop;
TF_RETURN_IF_ERROR(context->op_kernel().InputRange(name, &start, &stop)); TF_RETURN_IF_ERROR(context->op_kernel().InputRange(
StringPiece(name.data(), name.length()), &start, &stop));
if (stop != start + 1) { if (stop != start + 1) {
return errors::InvalidArgument("OpKernel used list-valued input name '", return errors::InvalidArgument("OpKernel used list-valued input name '",
name, name,
@ -365,7 +366,8 @@ Status XlaOpKernelContext::InputList(absl::string_view name,
std::vector<xla::XlaOp>* handles, std::vector<xla::XlaOp>* handles,
std::vector<TensorShape>* shapes) { std::vector<TensorShape>* shapes) {
OpInputList inputs; OpInputList inputs;
TF_RETURN_IF_ERROR(context_->input_list(name, &inputs)); TF_RETURN_IF_ERROR(
context_->input_list(StringPiece(name.data(), name.size()), &inputs));
handles->clear(); handles->clear();
shapes->clear(); shapes->clear();
for (const Tensor& input : inputs) { for (const Tensor& input : inputs) {
@ -378,7 +380,8 @@ Status XlaOpKernelContext::InputList(absl::string_view name,
Status XlaOpKernelContext::ConstantInputList( Status XlaOpKernelContext::ConstantInputList(
absl::string_view name, std::vector<xla::Literal>* outputs) { absl::string_view name, std::vector<xla::Literal>* outputs) {
int start, stop; int start, stop;
TF_RETURN_IF_ERROR(op_kernel().InputRange(name, &start, &stop)); TF_RETURN_IF_ERROR(op_kernel().InputRange(
StringPiece(name.data(), name.size()), &start, &stop));
outputs->resize(stop - start); outputs->resize(stop - start);
for (int i = start; i < stop; ++i) { for (int i = start; i < stop; ++i) {
TF_RETURN_IF_ERROR(ConstantInput(i, &(*outputs)[i])); TF_RETURN_IF_ERROR(ConstantInput(i, &(*outputs)[i]));
@ -612,7 +615,7 @@ const xla::XlaComputation* XlaOpKernelContext::GetOrCreateMul(
const Tensor& XlaOpKernelContext::GetInputTensorByName(absl::string_view name) { const Tensor& XlaOpKernelContext::GetInputTensorByName(absl::string_view name) {
const Tensor* tensor; const Tensor* tensor;
CHECK(context_->input(name, &tensor).ok()); CHECK(context_->input(StringPiece(name.data(), name.length()), &tensor).ok());
return *tensor; return *tensor;
} }

View File

@ -22,6 +22,7 @@ limitations under the License.
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/local_device.h" #include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/device_base.h"

View File

@ -28,6 +28,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/types.h" #include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h" #include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h" #include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h" #include "tensorflow/core/platform/types.h"
@ -64,7 +65,7 @@ StatusOr<std::unique_ptr<Literal>> PackedLiteralReader::Read(
absl::Span<const float> field = result->data<float>(); absl::Span<const float> field = result->data<float>();
char* data = absl::bit_cast<char*>(field.data()); char* data = absl::bit_cast<char*>(field.data());
uint64 bytes = elements * sizeof(float); uint64 bytes = elements * sizeof(float);
absl::string_view sp; tensorflow::StringPiece sp;
auto s = file_->Read(offset_, bytes, &sp, data); auto s = file_->Read(offset_, bytes, &sp, data);
offset_ += sp.size(); offset_ += sp.size();
if (!s.ok()) { if (!s.ok()) {
@ -85,7 +86,7 @@ bool PackedLiteralReader::IsExhausted() const {
// Try to read a single byte from offset_. If we can't, we've // Try to read a single byte from offset_. If we can't, we've
// exhausted the data. // exhausted the data.
char single_byte[1]; char single_byte[1];
absl::string_view sp; tensorflow::StringPiece sp;
auto s = file_->Read(offset_, sizeof(single_byte), &sp, single_byte); auto s = file_->Read(offset_, sizeof(single_byte), &sp, single_byte);
return !s.ok(); return !s.ok();
} }

View File

@ -56,6 +56,7 @@ tensorflow/core/lib/hash/hash.cc
tensorflow/core/lib/hash/crc32c.cc tensorflow/core/lib/hash/crc32c.cc
tensorflow/core/lib/hash/crc32c_accelerate.cc tensorflow/core/lib/hash/crc32c_accelerate.cc
tensorflow/core/lib/core/threadpool.cc tensorflow/core/lib/core/threadpool.cc
tensorflow/core/lib/core/stringpiece.cc
tensorflow/core/lib/core/status.cc tensorflow/core/lib/core/status.cc
tensorflow/core/lib/core/coding.cc tensorflow/core/lib/core/coding.cc
tensorflow/core/lib/core/arena.cc tensorflow/core/lib/core/arena.cc

View File

@ -0,0 +1,54 @@
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/lib/core/stringpiece.h"
#include <algorithm>
#include <iostream>
namespace tensorflow {
std::ostream& operator<<(std::ostream& o, StringPiece piece) {
o.write(piece.data(), piece.size());
return o;
}
size_t StringPiece::find(char c, size_t pos) const {
if (pos >= size_) {
return npos;
}
const char* result =
reinterpret_cast<const char*>(memchr(data_ + pos, c, size_ - pos));
return result != nullptr ? result - data_ : npos;
}
// Search range is [0..pos] inclusive. If pos == npos, search everything.
size_t StringPiece::rfind(char c, size_t pos) const {
if (size_ == 0) return npos;
for (const char* p = data_ + std::min(pos, size_ - 1); p >= data_; p--) {
if (*p == c) {
return p - data_;
}
}
return npos;
}
StringPiece StringPiece::substr(size_t pos, size_t n) const {
if (pos > size_) pos = size_;
if (n > size_ - pos) n = size_ - pos;
return StringPiece(data_ + pos, n);
}
} // namespace tensorflow

View File

@ -31,13 +31,124 @@ limitations under the License.
#include <string.h> #include <string.h>
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include "absl/strings/string_view.h" #include <type_traits>
#include "tensorflow/core/platform/types.h" #include "tensorflow/core/platform/types.h"
namespace tensorflow { namespace tensorflow {
// Deprecated: please use absl::string_view directly. class StringPiece {
using StringPiece = absl::string_view; public:
typedef size_t size_type;
// Create an empty slice.
StringPiece() : data_(nullptr), size_(0) {}
// Create a slice that refers to d[0,n-1].
StringPiece(const char* d, size_t n) : data_(d), size_(n) {}
// Create a slice that refers to the contents of "s"
StringPiece(const string& s) : data_(s.data()), size_(s.size()) {}
// Create a slice that refers to s[0,strlen(s)-1]
StringPiece(const char* s) : data_(s), size_(strlen(s)) {}
// Return a pointer to the beginning of the referenced data
const char* data() const { return data_; }
// Return the length (in bytes) of the referenced data
size_t size() const { return size_; }
// Return true iff the length of the referenced data is zero
bool empty() const { return size_ == 0; }
typedef const char* const_iterator;
typedef const char* iterator;
iterator begin() const { return data_; }
iterator end() const { return data_ + size_; }
static const size_t npos = size_type(-1);
// Return the ith byte in the referenced data.
// REQUIRES: n < size()
char operator[](size_t n) const {
assert(n < size());
return data_[n];
}
// Drop the first "n" bytes from this slice.
void remove_prefix(size_t n) {
assert(n <= size());
data_ += n;
size_ -= n;
}
void remove_suffix(size_t n) {
assert(size_ >= n);
size_ -= n;
}
size_t find(char c, size_t pos = 0) const;
size_t rfind(char c, size_t pos = npos) const;
StringPiece substr(size_t pos, size_t n = npos) const;
// Three-way comparison. Returns value:
// < 0 iff "*this" < "b",
// == 0 iff "*this" == "b",
// > 0 iff "*this" > "b"
int compare(StringPiece b) const;
// Converts to various kinds of strings, including `std::basic_string`.
template <typename S>
explicit operator S() const {
static_assert(
std::is_same<char, typename S::value_type>::value,
"Type mismatch: S must be a string with character type char.");
static_assert(
std::is_same<std::char_traits<char>, typename S::traits_type>::value,
"Type mismatch: S must be a string with traits type "
"std::char_traits<char>.");
if (!data()) return {};
return S(data(), size());
}
private:
const char* data_;
size_t size_;
// Intentionally copyable
};
inline bool operator==(StringPiece x, StringPiece y) {
return ((x.size() == y.size()) &&
(memcmp(x.data(), y.data(), x.size()) == 0));
}
inline bool operator!=(StringPiece x, StringPiece y) { return !(x == y); }
inline bool operator<(StringPiece x, StringPiece y) { return x.compare(y) < 0; }
inline bool operator>(StringPiece x, StringPiece y) { return x.compare(y) > 0; }
inline bool operator<=(StringPiece x, StringPiece y) {
return x.compare(y) <= 0;
}
inline bool operator>=(StringPiece x, StringPiece y) {
return x.compare(y) >= 0;
}
inline int StringPiece::compare(StringPiece b) const {
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
int r = memcmp(data_, b.data_, min_len);
if (r == 0) {
if (size_ < b.size_)
r = -1;
else if (size_ > b.size_)
r = +1;
}
return r;
}
// allow StringPiece to be logged
extern std::ostream& operator<<(std::ostream& o, tensorflow::StringPiece piece);
} // namespace tensorflow } // namespace tensorflow

View File

@ -124,9 +124,6 @@ class AlphaNum {
AlphaNum(const StringPiece &pc) : piece_(pc) {} // NOLINT(runtime/explicit) AlphaNum(const StringPiece &pc) : piece_(pc) {} // NOLINT(runtime/explicit)
AlphaNum(const tensorflow::string &str) // NOLINT(runtime/explicit) AlphaNum(const tensorflow::string &str) // NOLINT(runtime/explicit)
: piece_(str) {} : piece_(str) {}
template <typename A>
AlphaNum(const std::basic_string<char, std::char_traits<char>, A> &str)
: piece_(str) {} // NOLINT(runtime/explicit)
StringPiece::size_type size() const { return piece_.size(); } StringPiece::size_type size() const { return piece_.size(); }
const char *data() const { return piece_.data(); } const char *data() const { return piece_.data(); }