STT-tensorflow/tensorflow/compiler/aot/codegen_test_h.golden
A. Unique TensorFlower 96f4a930db Add named size and count methods for arg, result and var methods to AOT models.
PiperOrigin-RevId: 310375046
Change-Id: I3fd5c7fbdcfe141449a0a4d6827f6e5fe14b4e0b
2020-05-07 09:27:54 -07:00

424 lines
15 KiB
Plaintext

// Generated by tfcompile, the TensorFlow graph compiler. DO NOT EDIT!
//
// This header was generated via ahead-of-time compilation of a TensorFlow
// graph. An object file corresponding to this header was also generated.
// This header gives access to the functionality in that object file.
//
// clang-format off
#ifndef TFCOMPILE_GENERATED_entry_point_H_ // NOLINT(build/header_guard)
#define TFCOMPILE_GENERATED_entry_point_H_ // NOLINT(build/header_guard)
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "tensorflow/core/platform/types.h"
namespace Eigen { struct ThreadPoolDevice; }
namespace xla { class ExecutableRunOptions; }
// (Implementation detail) Entry point to the function in the object file.
extern "C" void entry_point(
void* result, const ::xla::ExecutableRunOptions* run_options,
const void** args, void** temps, tensorflow::int64* profile_counters);
extern "C" char __tfcompile_foo_bar_MyClass_ProgramShapeProto_protobuf_array_contents[];
namespace foo {
namespace bar {
// MyClass represents a computation previously specified in a
// TensorFlow graph, now compiled into executable code. This extends the generic
// XlaCompiledCpuFunction class with statically type-safe arg and result
// methods. Usage example:
//
// MyClass computation;
// // ...set args using computation.argN methods
// CHECK(computation.Run());
// // ...inspect results using computation.resultN methods
//
// The Run method invokes the actual computation, with inputs read from arg
// buffers, and outputs written to result buffers. Each Run call may also use
// a set of temporary buffers for the computation.
//
// By default each instance of this class manages its own arg, result and temp
// buffers. The AllocMode constructor parameter may be used to modify the
// buffer allocation strategy.
//
// Under the default allocation strategy, this class is thread-compatible:
// o Calls to non-const methods require exclusive access to the object.
// o Concurrent calls to const methods are OK, if those calls are made while it
// is guaranteed that no thread may call a non-const method.
//
// The logical function signature is:
// ((unknown): f32[1,2], (unknown): s64[3,4], (unknown): f32[1], (unknown): f32[1], (unknown): s32[5]) -> (u32[5,6], f32[1], s32[5])
//
// Memory stats:
// arg bytes total: 392
// arg bytes aligned: 576
// temp bytes total: 126
// temp bytes aligned: 512
class MyClass final : public tensorflow::XlaCompiledCpuFunction {
public:
// Number of input arguments for the compiled computation.
static constexpr size_t kNumArgs = 5;
// Number of variables for the compiled computation.
static constexpr size_t kNumVariables = 3;
// Byte size of each argument buffer. There are kNumArgs entries.
static const ::tensorflow::int64 ArgSize(::tensorflow::int32 index) {
return BufferInfos()[ArgIndexToBufferIndex()[index]].size();
}
// Returns static data used to create an XlaCompiledCpuFunction.
static const tensorflow::XlaCompiledCpuFunction::StaticData& StaticData() {
static XlaCompiledCpuFunction::StaticData* kStaticData = [](){
XlaCompiledCpuFunction::StaticData* data =
new XlaCompiledCpuFunction::StaticData;
set_static_data_raw_function(data, entry_point);
set_static_data_buffer_infos(data, BufferInfos());
set_static_data_num_buffers(data, kNumBuffers);
set_static_data_arg_index_table(data, ArgIndexToBufferIndex());
set_static_data_num_args(data, kNumArgs);
set_static_data_num_variables(data, kNumVariables);
set_static_data_result_index(data, kResultIndex);
set_static_data_arg_names(data, StaticArgNames());
set_static_data_variable_names(data, StaticVariableNames());
set_static_data_result_names(data, StaticResultNames());
set_static_data_program_shape(data, StaticProgramShape());
set_static_data_hlo_profile_printer_data(
data, StaticHloProfilePrinterData());
return data;
}();
return *kStaticData;
}
MyClass(AllocMode alloc_mode =
AllocMode::ARGS_VARIABLES_RESULTS_PROFILES_AND_TEMPS)
: XlaCompiledCpuFunction(StaticData(), alloc_mode) {}
MyClass(const MyClass&) = delete;
MyClass& operator=(const MyClass&) = delete;
// Arg methods for managing input buffers. Buffers are in row-major order.
// There is a set of methods for each positional argument, with the following
// general form:
//
// void set_argN_data(void* data)
// Sets the buffer of type T for positional argument N. May be called in
// any AllocMode. Must be called before Run to have an affect. Must be
// called in AllocMode::RESULTS_PROFILES_AND_TEMPS_ONLY for each positional
// argument, to set the argument buffers.
//
// T* argN_data()
// Returns the buffer of type T for positional argument N.
//
// T& argN(...dim indices...)
// Returns a reference to the value of type T for positional argument N,
// with dim indices specifying which value. No bounds checking is performed
// on dim indices.
void set_arg0_data(const void* data) {
set_arg_data(0, data);
}
float* arg0_data() {
return static_cast<float*>(arg_data(0));
}
float& arg0(size_t dim0, size_t dim1) {
return (*static_cast<float(*)[1][2]>(
arg_data(0)))[dim0][dim1];
}
const float* arg0_data() const {
return static_cast<const float*>(arg_data(0));
}
const float& arg0(size_t dim0, size_t dim1) const {
return (*static_cast<const float(*)[1][2]>(
arg_data(0)))[dim0][dim1];
}
int arg0_size() const {
return 2 * sizeof(float);
}
int arg0_count() const {
return 2;
}
void set_arg_myfeed_data(const void* data) {
set_arg_data(0, data);
}
float* arg_myfeed_data() {
return static_cast<float*>(arg_data(0));
}
float& arg_myfeed(size_t dim0, size_t dim1) {
return (*static_cast<float(*)[1][2]>(
arg_data(0)))[dim0][dim1];
}
const float* arg_myfeed_data() const {
return static_cast<const float*>(arg_data(0));
}
const float& arg_myfeed(size_t dim0, size_t dim1) const {
return (*static_cast<const float(*)[1][2]>(
arg_data(0)))[dim0][dim1];
}
int arg_myfeed_size() const {
return 2 * sizeof(float);
}
int arg_myfeed_count() const {
return 2;
}
void set_arg1_data(const void* data) {
set_arg_data(1, data);
}
tensorflow::int64* arg1_data() {
return static_cast<tensorflow::int64*>(arg_data(1));
}
tensorflow::int64& arg1(size_t dim0, size_t dim1) {
return (*static_cast<tensorflow::int64(*)[3][4]>(
arg_data(1)))[dim0][dim1];
}
const tensorflow::int64* arg1_data() const {
return static_cast<const tensorflow::int64*>(arg_data(1));
}
const tensorflow::int64& arg1(size_t dim0, size_t dim1) const {
return (*static_cast<const tensorflow::int64(*)[3][4]>(
arg_data(1)))[dim0][dim1];
}
int arg1_size() const {
return 12 * sizeof(tensorflow::int64);
}
int arg1_count() const {
return 12;
}
// Result methods for managing output buffers. Buffers are in row-major order.
// Must only be called after a successful Run call. There is a set of methods
// for each positional result, with the following general form:
//
// T* resultN_data()
// Returns the buffer of type T for positional result N.
//
// T& resultN(...dim indices...)
// Returns a reference to the value of type T for positional result N,
// with dim indices specifying which value. No bounds checking is performed
// on dim indices.
//
// Unlike the arg methods, there is no set_resultN_data method. The result
// buffers are managed internally, and may change after each call to Run.
tensorflow::uint32* result0_data() {
return static_cast<tensorflow::uint32*>(result_data(0));
}
tensorflow::uint32& result0(size_t dim0, size_t dim1) {
return (*static_cast<tensorflow::uint32(*)[5][6]>(
result_data(0)))[dim0][dim1];
}
const tensorflow::uint32* result0_data() const {
return static_cast<const tensorflow::uint32*>(result_data(0));
}
const tensorflow::uint32& result0(size_t dim0, size_t dim1) const {
return (*static_cast<const tensorflow::uint32(*)[5][6]>(
result_data(0)))[dim0][dim1];
}
int result0_size() const {
return 30 * sizeof(tensorflow::uint32);
}
int result0_count() const {
return 30;
}
tensorflow::uint32* result_myfetch_data() {
return static_cast<tensorflow::uint32*>(result_data(0));
}
tensorflow::uint32& result_myfetch(size_t dim0, size_t dim1) {
return (*static_cast<tensorflow::uint32(*)[5][6]>(
result_data(0)))[dim0][dim1];
}
const tensorflow::uint32* result_myfetch_data() const {
return static_cast<const tensorflow::uint32*>(result_data(0));
}
const tensorflow::uint32& result_myfetch(size_t dim0, size_t dim1) const {
return (*static_cast<const tensorflow::uint32(*)[5][6]>(
result_data(0)))[dim0][dim1];
}
int result_myfetch_size() const {
return 30 * sizeof(tensorflow::uint32);
}
int result_myfetch_count() const {
return 30;
}
// Methods for managing variable buffers. Buffers are in row-major order.
//
// For read-write variables we generate the following methods:
//
// void set_var_X_data(T* data)
// Sets the buffer for variable X. Must be called before Run if the
// allocation mode is RESULTS_PROFILES_AND_TEMPS_ONLY.
//
// T* var_X_data()
// Returns the buffer of type T for variable X. If the allocation mode is
// RESULTS_PROFILES_AND_TEMPS_ONLY then this buffer is the same as the
// buffer passed to set_var_X_data.
//
// T& var_X(...dim indices...)
// Returns a reference to the value of type T for variable X,
// with dim indices specifying which value. No bounds checking is performed
// on dim indices.
//
// For readonly variables we generate the same set of methods, except that we
// use `const T` instead of `T`. We use `const T` to avoid erasing the
// constness of the buffer passed to `set_var_X_data` but the underlying
// buffer is not const (and thus the const can be safely const-cast'ed away)
// unless `set_var_X_data` is called with a pointer to constant storage.
void set_var_myvar_readonly_data(const float* data) {
set_arg_data(2, data);
}
const float* var_myvar_readonly_data() {
return static_cast<const float*>(arg_data(2));
}
const float& var_myvar_readonly() {
return (*static_cast<const float(*)[1]>(
arg_data(2)))[0];
}
const float* var_myvar_readonly_data() const {
return static_cast<const float*>(arg_data(2));
}
const float& var_myvar_readonly() const {
return (*static_cast<const float(*)[1]>(
arg_data(2)))[0];
}
int var_myvar_readonly_size() const {
return 1 * sizeof(float);
}
int var_myvar_readonly_count() const {
return 1;
}
void set_var_myvar_data(float* data) {
set_arg_data(3, data);
}
float* var_myvar_data() {
return static_cast<float*>(arg_data(3));
}
float& var_myvar() {
return (*static_cast<float(*)[1]>(
arg_data(3)))[0];
}
const float* var_myvar_data() const {
return static_cast<const float*>(arg_data(3));
}
const float& var_myvar() const {
return (*static_cast<const float(*)[1]>(
arg_data(3)))[0];
}
int var_myvar_size() const {
return 1 * sizeof(float);
}
int var_myvar_count() const {
return 1;
}
void set_var_myvar2_data(tensorflow::int32* data) {
set_arg_data(4, data);
}
tensorflow::int32* var_myvar2_data() {
return static_cast<tensorflow::int32*>(arg_data(4));
}
tensorflow::int32& var_myvar2(size_t dim0) {
return (*static_cast<tensorflow::int32(*)[5]>(
arg_data(4)))[dim0];
}
const tensorflow::int32* var_myvar2_data() const {
return static_cast<const tensorflow::int32*>(arg_data(4));
}
const tensorflow::int32& var_myvar2(size_t dim0) const {
return (*static_cast<const tensorflow::int32(*)[5]>(
arg_data(4)))[dim0];
}
int var_myvar2_size() const {
return 5 * sizeof(tensorflow::int32);
}
int var_myvar2_count() const {
return 5;
}
private:
// Number of buffers for the compiled computation.
static constexpr size_t kNumBuffers = 12;
static const ::xla::cpu_function_runtime::BufferInfo* BufferInfos() {
static const ::xla::cpu_function_runtime::BufferInfo
kBufferInfos[kNumBuffers] = {
::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
::xla::cpu_function_runtime::BufferInfo({34ULL, 0ULL}),
::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
::xla::cpu_function_runtime::BufferInfo({386ULL, 1ULL}),
::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
::xla::cpu_function_runtime::BufferInfo({386ULL, 2ULL}),
::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
::xla::cpu_function_runtime::BufferInfo({386ULL, 3ULL}),
::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
::xla::cpu_function_runtime::BufferInfo({386ULL, 4ULL}),
::xla::cpu_function_runtime::BufferInfo({5ULL, ~0ULL}),
::xla::cpu_function_runtime::BufferInfo({481ULL, ~0ULL})
};
return kBufferInfos;
}
static const ::tensorflow::int32* ArgIndexToBufferIndex() {
static constexpr ::tensorflow::int32 kArgIndexToBufferIndex[kNumArgs] = {
1, 3, 5, 7, 9
};
return kArgIndexToBufferIndex;
}
// The 0-based index of the result tuple in the temporary buffers.
static constexpr size_t kResultIndex = 11;
// Array of names of each positional argument, terminated by nullptr.
static const char** StaticArgNames() {
static const char* kNames[] = {"myfeed", nullptr};
return kNames;
}
// Array of names of each positional variable, terminated by nullptr.
static const char** StaticVariableNames() {
static const char* kNames[] = {"myvar_readonly", "myvar", "myvar2", nullptr};
return kNames;
}
// Array of names of each positional result, terminated by nullptr.
static const char** StaticResultNames() {
static const char* kNames[] = {"myfetch", nullptr};
return kNames;
}
// Shape of the args and results.
static const ::xla::ProgramShapeProto* StaticProgramShape() {
static const ::xla::ProgramShapeProto* kShape = []() {
::xla::ProgramShapeProto* proto = new ::xla::ProgramShapeProto;
proto->ParseFromArray(&__tfcompile_foo_bar_MyClass_ProgramShapeProto_protobuf_array_contents[0], 149);
return proto;
}();
return kShape;
}
// Metadata that can be used to pretty-print profile counters.
static const ::xla::HloProfilePrinterData* StaticHloProfilePrinterData() {
static const ::xla::HloProfilePrinterData* kHloProfilePrinterData =
nullptr;
return kHloProfilePrinterData;
}
};
} // end namespace bar
} // end namespace foo
#endif // TFCOMPILE_GENERATED_entry_point_H_
// clang-format on