Add option for passing in hidden ops to Python code generation as a file.

Change: 131133377
This commit is contained in:
Derek Murray 2016-08-23 20:29:27 -08:00 committed by TensorFlower Gardener
parent 2269152197
commit 08ce1a4d2f
6 changed files with 344 additions and 245 deletions

View File

@ -311,9 +311,7 @@ py_test(
tf_gen_op_wrapper_py(
name = "functional_ops",
out = "ops/gen_functional_ops.py",
hidden = [
"SymbolicGradient",
],
hidden_file = "ops/hidden_ops.txt",
)
py_library(
@ -513,51 +511,19 @@ py_test(
tf_gen_op_wrapper_py(
name = "array_ops",
hidden = [
"BroadcastGradientArgs",
"ConcatOffset",
"Concat",
"Const",
"EditDistance",
"MirrorPad",
"MirrorPadGrad",
"OneHot",
"Pack",
"Pad",
"Placeholder",
"RefIdentity",
"Split",
"Slice",
"TileGrad", # Exported through array_grad instead of array_ops.
"ZerosLike", # TODO(josh11b): Use this instead of the Python version.
"Unpack",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "candidate_sampling_ops",
hidden = [
"AllCandidateSampler",
"ComputeAccidentalHits",
"FixedUnigramCandidateSampler",
"LearnedUnigramCandidateSampler",
"LogUniformCandidateSampler",
"ThreadUnsafeUnigramCandidateSampler",
"UniformCandidateSampler",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "control_flow_ops",
hidden = [
"Switch",
"Merge",
"RefMerge",
"Exit",
"RefExit",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
deps = [
"//tensorflow/core:control_flow_ops_op_lib",
@ -567,263 +533,98 @@ tf_gen_op_wrapper_py(
tf_gen_op_wrapper_py(
name = "ctc_ops",
hidden = [
"CTCLoss",
"CTCGreedyDecoder",
"CTCBeamSearchDecoder",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "data_flow_ops",
hidden = [
"Barrier",
"BarrierClose",
"BarrierIncompleteSize",
"BarrierInsertMany",
"BarrierReadySize",
"BarrierTakeMany",
"PriorityQueue",
"FIFOQueue",
"HashTable",
"InitializeTable",
"InitializeTableFromTextFile",
"LookupTableExport",
"LookupTableFind",
"LookupTableImport",
"LookupTableInsert",
"LookupTableSize",
"MutableHashTable",
"MutableHashTableOfTensors",
"Mutex",
"MutexAcquire",
"MutexRelease",
"PaddingFIFOQueue",
"QueueClose",
"QueueDequeue",
"QueueDequeueMany",
"QueueDequeueUpTo",
"QueueEnqueue",
"QueueEnqueueMany",
"QueueSize",
"RandomShuffleQueue",
"Stack",
"StackPop",
"StackPush",
"StackClose",
"TensorArray",
"TensorArrayClose",
"TensorArrayConcat",
"TensorArrayGrad",
"TensorArrayRead",
"TensorArrayPack",
"TensorArraySize",
"TensorArraySplit",
"TensorArrayUnpack",
"TensorArrayWrite",
"GetSessionHandle",
"GetSessionTensor",
"DeleteSessionTensor",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "image_ops",
hidden = [
"RandomCrop",
"ResizeBilinearGrad",
"ResizeNearestNeighborGrad",
"AdjustContrastv2",
"ScaleImageGrad",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "io_ops",
hidden = [
"FixedLengthRecordReader",
"IdentityReader",
"ReaderClose",
"ReaderEnqueueWork",
"ReaderNumRecordsProduced",
"ReaderNumWorkUnitsCompleted",
"ReaderRead",
"ReaderReadUpTo",
"ReaderReset",
"ReaderRestoreState",
"ReaderSerializeState",
"ReaderWorkQueueLength",
"Restore",
"RestoreSlice",
"Save",
"SaveSlices",
"ShardedFilename",
"ShardedFilespec",
"TextLineReader",
"TFRecordReader",
"WholeFileReader",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "linalg_ops",
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "logging_ops",
hidden = [
"Assert",
"AudioSummary",
"HistogramAccumulatorSummary",
"HistogramSummary",
"ImageSummary",
"MergeSummary",
"Print",
"ScalarSummary",
"TensorSummary",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "math_ops",
hidden = [
"Abs",
"AddN",
"All",
"Any",
"BatchMatMul",
"Complex",
"Max",
"Mean",
"Min",
"Pow",
"Prod",
"Range",
"SparseMatMul",
"Sum",
"MatMul",
"Sigmoid",
"Tanh",
"SigmoidGrad",
"TanhGrad",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "nn_ops",
hidden = [
"AvgPoolGrad", # "*Grad" accessible through nn_grad instead of nn_ops.
"BatchNormWithGlobalNormalization",
"BatchNormWithGlobalNormalizationGrad",
"SoftmaxCrossEntropyWithLogits",
"SparseSoftmaxCrossEntropyWithLogits",
"LRNGrad",
"MaxPoolGrad",
"MaxPoolGradWithArgmax",
"ReluGrad",
"Relu6Grad",
"EluGrad",
"SoftplusGrad",
"SoftsignGrad",
"TopK",
"TopKV2",
"BiasAdd",
"BiasAddV1",
"Relu6",
"AvgPool",
"MaxPool",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "parsing_ops",
hidden = [
"ParseExample",
"ParseSingleSequenceExample",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "random_ops",
hidden = [
"RandomGamma",
"RandomUniform",
"RandomUniformInt",
"RandomShuffle",
"RandomStandardNormal",
"ParameterizedTruncatedNormal",
"TruncatedNormal",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "script_ops",
hidden = [
"PyFunc",
"PyFuncStateless",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "state_ops",
hidden = [
"Variable",
"TemporaryVariable",
"DestroyTemporaryVariable",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "sparse_ops",
hidden = [
"DeserializeManySparse",
"SerializeManySparse",
"SerializeSparse",
"SparseAdd",
"SparseAddGrad",
"SparseConcat",
"SparseSplit",
"SparseSelectLastK",
"SparseReorder",
"SparseReshape",
"SparseToDense",
"SparseTensorDenseAdd",
"SparseTensorDenseMatMul",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "string_ops",
hidden = [
"StringSplit",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)
tf_gen_op_wrapper_py(
name = "user_ops",
hidden = [
"Fact",
],
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = False,
)
tf_gen_op_wrapper_py(
name = "training_ops",
out = "training/gen_training_ops.py",
hidden_file = "ops/hidden_ops.txt",
require_shape_functions = True,
)

View File

@ -646,7 +646,7 @@ void GenerateLowerCaseOpName(const string& str, string* result) {
} // namespace
string GetPythonOps(const OpList& ops, const string& hidden_ops,
string GetPythonOps(const OpList& ops, const std::vector<string>& hidden_ops,
bool require_shapes) {
string result;
// Header
@ -668,15 +668,13 @@ from tensorflow.python.framework import op_def_library
)");
std::vector<string> hidden_vec = str_util::Split(hidden_ops, ',');
// We'll make a copy of ops that filters out descriptions.
OpList cleaned_ops;
auto out = cleaned_ops.mutable_op();
out->Reserve(ops.op_size());
for (const auto& op_def : ops.op()) {
bool is_hidden = false;
for (const string& hidden : hidden_vec) {
for (const string& hidden : hidden_ops) {
if (op_def.name() == hidden) {
is_hidden = true;
break;
@ -726,22 +724,16 @@ _op_def_lib = _InitOpDefLibrary()
return result;
}
void PrintPythonOps(const OpList& ops, const string& hidden_ops,
void PrintPythonOps(const OpList& ops, const std::vector<string>& hidden_ops,
bool require_shapes) {
printf("%s", GetPythonOps(ops, hidden_ops, require_shapes).c_str());
}
string GetAllPythonOps(const char* hidden, bool require_shapes) {
OpList ops;
OpRegistry::Global()->Export(false, &ops);
return GetPythonOps(ops, hidden, require_shapes);
}
string GetPythonWrappers(const char* op_wrapper_buf, size_t op_wrapper_len) {
string op_list_str(op_wrapper_buf, op_wrapper_len);
OpList ops;
ops.ParseFromString(op_list_str);
return GetPythonOps(ops, "", false);
return GetPythonOps(ops, {}, false);
}
} // namespace tensorflow

View File

@ -17,6 +17,7 @@ limitations under the License.
#define TENSORFLOW_PYTHON_FRAMEWORK_PYTHON_OP_GEN_H_
#include <string>
#include <vector>
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/platform/types.h"
@ -26,9 +27,9 @@ namespace tensorflow {
// list of Op names that should get a leading _ in the output.
// The Print* version prints the output to stdout, Get* version returns the
// output as a string.
void PrintPythonOps(const OpList& ops, const string& hidden_ops,
void PrintPythonOps(const OpList& ops, const std::vector<string>& hidden_ops,
bool require_shapes);
string GetPythonOps(const OpList& ops, const string& hidden_ops,
string GetPythonOps(const OpList& ops, const std::vector<string>& hidden_ops,
bool require_shapes);
// Get the python wrappers for a list of ops in a OpList.

View File

@ -15,18 +15,73 @@ limitations under the License.
#include "tensorflow/python/framework/python_op_gen.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
void PrintAllPythonOps(const char* hidden, bool require_shapes) {
Status ReadHiddenOpsFromFile(const string& filename,
std::vector<string>* hidden_ops) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(Env::Default()->NewRandomAccessFile(filename, &file));
std::unique_ptr<io::InputBuffer> input_buffer(
new io::InputBuffer(file.get(), 256 << 10));
string line_contents;
Status s = input_buffer->ReadLine(&line_contents);
while (s.ok()) {
// The parser assumes that the op name is the first string on each
// line with no preceding whitespace, and ignores lines that do
// not start with an op name as a comment.
strings::Scanner scanner{StringPiece(line_contents)};
StringPiece op_name;
if (scanner.One(strings::Scanner::LETTER_DIGIT_DOT)
.Any(strings::Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE)
.GetResult(nullptr, &op_name)) {
hidden_ops->emplace_back(op_name.ToString());
}
s = input_buffer->ReadLine(&line_contents);
}
if (!errors::IsOutOfRange(s)) return s;
return Status::OK();
}
// The argument parsing is deliberately simplistic to support our only
// known use cases:
//
// 1. Read all op names from a file.
// 2. Read all op names from the arg as a comma-delimited list.
//
// Expected command-line argument syntax:
// ARG ::= '@' FILENAME
// | OP_NAME [',' OP_NAME]*
Status ParseHiddenOpsCommandLine(const char* arg,
std::vector<string>* hidden_ops) {
std::vector<string> op_names = str_util::Split(arg, ',');
if (op_names.size() == 1 && op_names[0].substr(0, 1) == "@") {
const string filename = op_names[0].substr(1);
return tensorflow::ReadHiddenOpsFromFile(filename, hidden_ops);
} else {
*hidden_ops = std::move(op_names);
}
return Status::OK();
}
void PrintAllPythonOps(const std::vector<string>& hidden_ops,
bool require_shapes) {
OpList ops;
OpRegistry::Global()->Export(false, &ops);
PrintPythonOps(ops, hidden, require_shapes);
PrintPythonOps(ops, hidden_ops, require_shapes);
}
} // namespace
@ -34,10 +89,15 @@ void PrintAllPythonOps(const char* hidden, bool require_shapes) {
int main(int argc, char* argv[]) {
tensorflow::port::InitMain(argv[0], &argc, &argv);
// Usage:
// gen_main [ @FILENAME | OpName[,OpName]* ] (0 | 1)
if (argc == 2) {
tensorflow::PrintAllPythonOps("", std::string(argv[1]) == "1");
tensorflow::PrintAllPythonOps({}, tensorflow::string(argv[1]) == "1");
} else if (argc == 3) {
tensorflow::PrintAllPythonOps(argv[1], std::string(argv[2]) == "1");
std::vector<tensorflow::string> hidden_ops;
TF_CHECK_OK(tensorflow::ParseHiddenOpsCommandLine(argv[1], &hidden_ops));
tensorflow::PrintAllPythonOps(hidden_ops,
tensorflow::string(argv[2]) == "1");
} else {
return -1;
}

View File

@ -0,0 +1,224 @@
# array_ops
BroadcastGradientArgs
ConcatOffset
Concat
Const
EditDistance
MirrorPad
MirrorPadGrad
OneHot
Pack
Pad
Placeholder
RefIdentity
Split
Slice
TileGrad # Exported through array_grad instead of array_ops.
ZerosLike # TODO(josh11b): Use this instead of the Python version.
Unpack
# candidate_sampling_ops
AllCandidateSampler
ComputeAccidentalHits
FixedUnigramCandidateSampler
LearnedUnigramCandidateSampler
LogUniformCandidateSampler
ThreadUnsafeUnigramCandidateSampler
UniformCandidateSampler
# control_flow_ops
Switch
Merge
RefMerge
Exit
RefExit
# ctc_ops
CTCLoss
CTCGreedyDecoder
CTCBeamSearchDecoder
# data_flow_ops
Barrier
BarrierClose
BarrierIncompleteSize
BarrierInsertMany
BarrierReadySize
BarrierTakeMany
PriorityQueue
FIFOQueue
HashTable
InitializeTable
InitializeTableFromTextFile
LookupTableExport
LookupTableFind
LookupTableImport
LookupTableInsert
LookupTableSize
MutableHashTable
MutableHashTableOfTensors
Mutex
MutexAcquire
MutexRelease
PaddingFIFOQueue
QueueClose
QueueDequeue
QueueDequeueMany
QueueDequeueUpTo
QueueEnqueue
QueueEnqueueMany
QueueSize
RandomShuffleQueue
Stack
StackPop
StackPush
StackClose
TensorArray
TensorArrayClose
TensorArrayConcat
TensorArrayGrad
TensorArrayRead
TensorArrayPack
TensorArraySize
TensorArraySplit
TensorArrayUnpack
TensorArrayWrite
GetSessionHandle
GetSessionTensor
DeleteSessionTensor
# functional_ops
SymbolicGradient
# image_ops
RandomCrop
ResizeBilinearGrad
ResizeNearestNeighborGrad
AdjustContrastv2
ScaleImageGrad
# io_ops
FixedLengthRecordReader
IdentityReader
ReaderClose
ReaderEnqueueWork
ReaderNumRecordsProduced
ReaderNumWorkUnitsCompleted
ReaderRead
ReaderReadUpTo
ReaderReset
ReaderRestoreState
ReaderSerializeState
ReaderWorkQueueLength
Restore
RestoreSlice
Save
SaveSlices
ShardedFilename
ShardedFilespec
TextLineReader
TFRecordReader
WholeFileReader
# linalg_ops
# (None)
# logging_ops
Assert
AudioSummary
HistogramAccumulatorSummary
HistogramSummary
ImageSummary
MergeSummary
Print
ScalarSummary
TensorSummary
# math_ops
Abs
AddN
All
Any
BatchMatMul
Complex
Max
Mean
Min
Pow
Prod
Range
SparseMatMul
Sum
MatMul
Sigmoid
Tanh
SigmoidGrad
TanhGrad
# nn_ops
AvgPoolGrad # "*Grad" accessible through nn_grad instead of nn_ops.
BatchNormWithGlobalNormalization
BatchNormWithGlobalNormalizationGrad
SoftmaxCrossEntropyWithLogits
SparseSoftmaxCrossEntropyWithLogits
LRNGrad
MaxPoolGrad
MaxPoolGradWithArgmax
ReluGrad
Relu6Grad
EluGrad
SoftplusGrad
SoftsignGrad
TopK
TopKV2
BiasAdd
BiasAddV1
Relu6
AvgPool
MaxPool
# parsing_ops
ParseExample
ParseSingleSequenceExample
# random_ops
RandomGamma
RandomUniform
RandomUniformInt
RandomShuffle
RandomStandardNormal
ParameterizedTruncatedNormal
TruncatedNormal
# script_ops
PyFunc
PyFuncStateless
# state_ops
Variable
TemporaryVariable
DestroyTemporaryVariable
# sparse_ops
DeserializeManySparse
SerializeManySparse
SerializeSparse
SparseAdd
SparseAddGrad
SparseConcat
SparseSplit
SparseSelectLastK
SparseReorder
SparseReshape
SparseToDense
SparseTensorDenseAdd
SparseTensorDenseMatMul
# string_ops
StringSplit
# user_ops
Fact
# training_ops
# (None)

View File

@ -216,8 +216,8 @@ def tf_gen_op_wrappers_cc(name,
alwayslink=1,)
# Invoke this rule in .../tensorflow/python to build the wrapper library.
def tf_gen_op_wrapper_py(name, out=None, hidden=[], visibility=None, deps=[],
require_shape_functions=False):
def tf_gen_op_wrapper_py(name, out=None, hidden=None, visibility=None, deps=[],
require_shape_functions=False, hidden_file=None):
# Construct a cc_binary containing the specified ops.
tool_name = "gen_" + name + "_py_wrappers_cc"
if not deps:
@ -236,12 +236,33 @@ def tf_gen_op_wrapper_py(name, out=None, hidden=[], visibility=None, deps=[],
if not out:
out = "ops/gen_" + name + ".py"
native.genrule(
name=name + "_pygenrule",
outs=[out],
tools=[tool_name],
cmd=("$(location " + tool_name + ") " + ",".join(hidden)
+ " " + ("1" if require_shape_functions else "0") + " > $@"))
if hidden:
# `hidden` is a list of op names to be hidden in the generated module.
native.genrule(
name=name + "_pygenrule",
outs=[out],
tools=[tool_name],
cmd=("$(location " + tool_name + ") " + ",".join(hidden)
+ " " + ("1" if require_shape_functions else "0") + " > $@"))
elif hidden_file:
# `hidden_file` is file containing a list of op names to be hidden in the
# generated module.
native.genrule(
name=name + "_pygenrule",
outs=[out],
srcs=[hidden_file],
tools=[tool_name],
cmd=("$(location " + tool_name + ") @$(location "
+ hidden_file + ") " + ("1" if require_shape_functions else "0")
+ " > $@"))
else:
# No ops should be hidden in the generated module.
native.genrule(
name=name + "_pygenrule",
outs=[out],
tools=[tool_name],
cmd=("$(location " + tool_name + ") "
+ ("1" if require_shape_functions else "0") + " > $@"))
# Make a py_library out of the generated python file.
native.py_library(name=name,