Adding tf_export decorators/calls to TensorFlow functions and constants.

PiperOrigin-RevId: 184020524
This commit is contained in:
Anna R 2018-01-31 11:25:38 -08:00 committed by Michael Case
parent acb2d61474
commit adc9ee7150
23 changed files with 135 additions and 0 deletions

View File

@ -35,6 +35,7 @@ from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class SessionInterface(object):
@ -1441,6 +1442,7 @@ class BaseSession(SessionInterface):
return handles
@tf_export('Session')
class Session(BaseSession):
"""A class for running TensorFlow operations.
@ -1581,6 +1583,7 @@ class Session(BaseSession):
tf_session.TF_Reset(target, containers, config)
@tf_export('InteractiveSession')
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.

View File

@ -41,8 +41,10 @@ from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.Dataset")
class Dataset(object):
"""Represents a potentially large set of elements.

View File

@ -25,6 +25,7 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util.tf_export import tf_export
# NOTE(mrry): It is legitimate to call `Iterator.get_next()` multiple
@ -47,6 +48,7 @@ GET_NEXT_CALL_WARNING_MESSAGE = (
"`next_element` inside the loop.")
@tf_export("data.Iterator")
class Iterator(object):
"""Represents the state of iterating through a `Dataset`."""

View File

@ -23,12 +23,14 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.util.tf_export import tf_export
# TODO(b/64974358): Increase default buffer size to 256 MB.
_DEFAULT_READER_BUFFER_SIZE_BYTES = 256 * 1024 # 256 KB
@tf_export("data.TextLineDataset")
class TextLineDataset(Dataset):
"""A `Dataset` comprising lines from one or more text files."""
@ -71,6 +73,7 @@ class TextLineDataset(Dataset):
return dtypes.string
@tf_export("data.TFRecordDataset")
class TFRecordDataset(Dataset):
"""A `Dataset` comprising records from one or more TFRecord files."""
@ -115,6 +118,7 @@ class TFRecordDataset(Dataset):
return dtypes.string
@tf_export("data.FixedLengthRecordDataset")
class FixedLengthRecordDataset(Dataset):
"""A `Dataset` of fixed-length records from one or more binary files."""

View File

@ -36,12 +36,14 @@ from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_SINGLE_FEATURE_DEFAULT_NAME = 'feature'
_SINGLE_RECEIVER_DEFAULT_NAME = 'input'
@tf_export('estimator.export.ServingInputReceiver')
class ServingInputReceiver(collections.namedtuple(
'ServingInputReceiver',
['features', 'receiver_tensors', 'receiver_tensors_alternatives'])):
@ -118,6 +120,7 @@ class ServingInputReceiver(collections.namedtuple(
receiver_tensors_alternatives=receiver_tensors_alternatives)
@tf_export('estimator.export.build_parsing_serving_input_receiver_fn')
def build_parsing_serving_input_receiver_fn(feature_spec,
default_batch_size=None):
"""Build a serving_input_receiver_fn expecting fed tf.Examples.
@ -146,6 +149,7 @@ def build_parsing_serving_input_receiver_fn(feature_spec,
return serving_input_receiver_fn
@tf_export('estimator.export.build_raw_serving_input_receiver_fn')
def build_raw_serving_input_receiver_fn(features, default_batch_size=None):
"""Build a serving_input_receiver_fn expecting feature Tensors.

View File

@ -26,8 +26,10 @@ import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util.tf_export import tf_export
@tf_export('estimator.export.ExportOutput')
class ExportOutput(object):
"""Represents an output of a model that can be served.
@ -50,6 +52,7 @@ class ExportOutput(object):
pass
@tf_export('estimator.export.ClassificationOutput')
class ClassificationOutput(ExportOutput):
"""Represents the output of a classification head.
@ -118,6 +121,7 @@ class ClassificationOutput(ExportOutput):
examples, self.classes, self.scores)
@tf_export('estimator.export.RegressionOutput')
class RegressionOutput(ExportOutput):
"""Represents the output of a regression head."""
@ -153,6 +157,7 @@ class RegressionOutput(ExportOutput):
_SINGLE_OUTPUT_DEFAULT_NAME = 'output'
@tf_export('estimator.export.PredictOutput')
class PredictOutput(ExportOutput):
"""Represents the output of a generic prediction head.

View File

@ -24,6 +24,7 @@ import numpy as np
from six import string_types
from tensorflow.python.estimator.inputs.queues import feeding_functions
from tensorflow.python.util.tf_export import tf_export
# Key name to pack the target into dict of `features`. See
# `_get_unique_target_key` for details.
@ -86,6 +87,7 @@ def _validate_and_convert_features(x):
return ordered_dict_data
@tf_export('estimator.inputs.numpy_input_fn')
def numpy_input_fn(x,
y=None,
batch_size=128,

View File

@ -21,6 +21,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
from tensorflow.python.util.tf_export import tf_export
try:
# pylint: disable=g-import-not-at-top
@ -34,6 +35,7 @@ except ImportError:
HAS_PANDAS = False
@tf_export('estimator.inputs.pandas_input_fn')
def pandas_input_fn(x,
y=None,
batch_size=128,

View File

@ -157,6 +157,7 @@ from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def _internal_input_layer(features,
@ -209,6 +210,7 @@ def _internal_input_layer(features,
return array_ops.concat(output_tensors, 1)
@tf_export('feature_column.input_layer')
def input_layer(features,
feature_columns,
weight_collections=None,
@ -329,6 +331,7 @@ class InputLayer(object):
return self._input_layer_template.weights
@tf_export('feature_column.linear_model')
def linear_model(features,
feature_columns,
units=1,
@ -498,6 +501,7 @@ def _transform_features(features, feature_columns):
return outputs
@tf_export('feature_column.make_parse_example_spec')
def make_parse_example_spec(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
@ -557,6 +561,7 @@ def make_parse_example_spec(feature_columns):
return result
@tf_export('feature_column.embedding_column')
def embedding_column(
categorical_column, dimension, combiner='mean', initializer=None,
ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None,
@ -807,6 +812,7 @@ def shared_embedding_columns(
return result
@tf_export('feature_column.numeric_column')
def numeric_column(key,
shape=(1,),
default_value=None,
@ -881,6 +887,7 @@ def numeric_column(key,
normalizer_fn=normalizer_fn)
@tf_export('feature_column.bucketized_column')
def bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
@ -970,6 +977,7 @@ def _assert_string_or_int(dtype, prefix):
'{} dtype must be string or integer. dtype: {}.'.format(prefix, dtype))
@tf_export('feature_column.categorical_column_with_hash_bucket')
def categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
@ -1026,6 +1034,7 @@ def categorical_column_with_hash_bucket(key,
return _HashedCategoricalColumn(key, hash_bucket_size, dtype)
@tf_export('feature_column.categorical_column_with_vocabulary_file')
def categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
@ -1145,6 +1154,7 @@ def categorical_column_with_vocabulary_file(key,
dtype=dtype)
@tf_export('feature_column.categorical_column_with_vocabulary_list')
def categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A `_CategoricalColumn` with in-memory vocabulary.
@ -1255,6 +1265,7 @@ def categorical_column_with_vocabulary_list(
default_value=default_value, num_oov_buckets=num_oov_buckets)
@tf_export('feature_column.categorical_column_with_identity')
def categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `_CategoricalColumn` that returns identity values.
@ -1322,6 +1333,7 @@ def categorical_column_with_identity(key, num_buckets, default_value=None):
key=key, num_buckets=num_buckets, default_value=default_value)
@tf_export('feature_column.indicator_column')
def indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
@ -1350,6 +1362,7 @@ def indicator_column(categorical_column):
return _IndicatorColumn(categorical_column)
@tf_export('feature_column.weighted_categorical_column')
def weighted_categorical_column(
categorical_column, weight_feature_key, dtype=dtypes.float32):
"""Applies weight values to a `_CategoricalColumn`.
@ -1424,6 +1437,7 @@ def weighted_categorical_column(
dtype=dtype)
@tf_export('feature_column.crossed_column')
def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.

View File

@ -31,6 +31,7 @@ from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
class FileIO(object):
@ -235,6 +236,7 @@ class FileIO(object):
self._writable_file = None
@tf_export("gfile.Exists")
def file_exists(filename):
"""Determines whether a path exists or not.
@ -256,6 +258,7 @@ def file_exists(filename):
return True
@tf_export("gfile.Remove")
def delete_file(filename):
"""Deletes the file located at 'filename'.
@ -306,6 +309,7 @@ def write_string_to_file(filename, file_content):
f.write(file_content)
@tf_export("gfile.Glob")
def get_matching_files(filename):
"""Returns a list of files that match the given pattern(s).
@ -336,6 +340,7 @@ def get_matching_files(filename):
]
@tf_export("gfile.MkDir")
def create_dir(dirname):
"""Creates a directory with the name 'dirname'.
@ -353,6 +358,7 @@ def create_dir(dirname):
pywrap_tensorflow.CreateDir(compat.as_bytes(dirname), status)
@tf_export("gfile.MakeDirs")
def recursive_create_dir(dirname):
"""Creates a directory and all parent/intermediate directories.
@ -368,6 +374,7 @@ def recursive_create_dir(dirname):
pywrap_tensorflow.RecursivelyCreateDir(compat.as_bytes(dirname), status)
@tf_export("gfile.Copy")
def copy(oldpath, newpath, overwrite=False):
"""Copies data from oldpath to newpath.
@ -385,6 +392,7 @@ def copy(oldpath, newpath, overwrite=False):
compat.as_bytes(oldpath), compat.as_bytes(newpath), overwrite, status)
@tf_export("gfile.Rename")
def rename(oldname, newname, overwrite=False):
"""Rename or move a file / directory.
@ -426,6 +434,7 @@ def atomic_write_string_to_file(filename, contents, overwrite=True):
raise
@tf_export("gfile.DeleteRecursively")
def delete_recursively(dirname):
"""Deletes everything under dirname recursively.
@ -439,6 +448,7 @@ def delete_recursively(dirname):
pywrap_tensorflow.DeleteRecursively(compat.as_bytes(dirname), status)
@tf_export("gfile.IsDirectory")
def is_directory(dirname):
"""Returns whether the path is a directory or not.
@ -452,6 +462,7 @@ def is_directory(dirname):
return pywrap_tensorflow.IsDirectory(compat.as_bytes(dirname), status)
@tf_export("gfile.ListDirectory")
def list_directory(dirname):
"""Returns a list of entries contained within a directory.
@ -479,6 +490,7 @@ def list_directory(dirname):
]
@tf_export("gfile.Walk")
def walk(top, in_order=True):
"""Recursive directory tree generator for directories.
@ -522,6 +534,7 @@ def walk(top, in_order=True):
yield here
@tf_export("gfile.Stat")
def stat(filename):
"""Returns file statistics for a given path.

View File

@ -22,8 +22,10 @@ from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
@tf_export("python_io.TFRecordCompressionType")
class TFRecordCompressionType(object):
"""The type of compression for the record."""
NONE = 0
@ -33,6 +35,7 @@ class TFRecordCompressionType(object):
# NOTE(vrv): This will eventually be converted into a proto. to match
# the interface used by the C++ RecordWriter.
@tf_export("python_io.TFRecordOptions")
class TFRecordOptions(object):
"""Options used for manipulating TFRecord files."""
compression_type_map = {
@ -51,6 +54,7 @@ class TFRecordOptions(object):
return cls.compression_type_map[options.compression_type]
@tf_export("python_io.tf_record_iterator")
def tf_record_iterator(path, options=None):
"""An iterator that read the records from a TFRecords file.
@ -81,6 +85,7 @@ def tf_record_iterator(path, options=None):
reader.Close()
@tf_export("python_io.TFRecordWriter")
class TFRecordWriter(object):
"""A class to write records to a TFRecords file.

View File

@ -28,8 +28,10 @@ from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import util
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.tf_export import tf_export
@tf_export("losses.Reduction")
class Reduction(object):
"""Types of loss reduction.
@ -152,6 +154,7 @@ def _num_elements(losses):
return array_ops.size(losses, name=scope, out_type=losses.dtype)
@tf_export("losses.compute_weighted_loss")
def compute_weighted_loss(
losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
@ -211,6 +214,7 @@ def compute_weighted_loss(
return loss
@tf_export("losses.absolute_difference")
def absolute_difference(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
@ -258,6 +262,7 @@ def absolute_difference(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export("losses.cosine_distance")
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def cosine_distance(
labels, predictions, axis=None, weights=1.0, scope=None,
@ -311,6 +316,7 @@ def cosine_distance(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export("losses.hinge_loss")
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
@ -352,6 +358,7 @@ def hinge_loss(labels, logits, weights=1.0, scope=None,
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export("losses.huber_loss")
def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
@ -420,6 +427,7 @@ def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export("losses.log_loss")
def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
@ -471,6 +479,7 @@ def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
# TODO(b/37208492): Add reduction arg.
@tf_export("losses.mean_pairwise_squared_error")
def mean_pairwise_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
@ -557,6 +566,7 @@ def mean_pairwise_squared_error(
return mean_loss
@tf_export("losses.mean_squared_error")
def mean_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
@ -604,6 +614,7 @@ def mean_squared_error(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export("losses.sigmoid_cross_entropy")
def sigmoid_cross_entropy(
multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
@ -662,6 +673,7 @@ def sigmoid_cross_entropy(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export("losses.softmax_cross_entropy")
def softmax_cross_entropy(
onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
@ -771,6 +783,7 @@ def _remove_squeezable_dimensions(
return labels, predictions, weights
@tf_export("losses.sparse_softmax_cross_entropy")
def sparse_softmax_cross_entropy(
labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,

View File

@ -30,8 +30,10 @@ from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("losses.add_loss")
def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
"""Adds a externally defined loss to the collection of losses.
@ -43,6 +45,7 @@ def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
ops.add_to_collection(loss_collection, loss)
@tf_export("losses.get_losses")
def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Gets the list of losses from the loss_collection.
@ -56,6 +59,7 @@ def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
return ops.get_collection(loss_collection, scope)
@tf_export("losses.get_regularization_losses")
def get_regularization_losses(scope=None):
"""Gets the list of regularization losses.
@ -68,6 +72,7 @@ def get_regularization_losses(scope=None):
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
@tf_export("losses.get_regularization_loss")
def get_regularization_loss(scope=None, name="total_regularization_loss"):
"""Gets the total regularization loss.
@ -85,6 +90,7 @@ def get_regularization_loss(scope=None, name="total_regularization_loss"):
return constant_op.constant(0.0)
@tf_export("losses.get_total_loss")
def get_total_loss(add_regularization_losses=True, name="total_loss"):
"""Returns a tensor whose value represents the total loss.

View File

@ -23,6 +23,7 @@ import sys as _sys
from tensorflow.python.platform import flags
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export
def _usage(shorthelp):
@ -108,6 +109,7 @@ def _define_help_flags():
_define_help_flags_called = True
@tf_export('app.run')
def run(main=None, argv=None):
"""Runs the program with an optional 'main' function and 'argv' list."""

View File

@ -29,8 +29,10 @@ import sys as _sys
from tensorflow.python.util import tf_inspect as _inspect
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export
@tf_export('resource_loader.load_resource')
def load_resource(path):
"""Load the resource at given path, where path is relative to tensorflow/.
@ -52,6 +54,7 @@ def load_resource(path):
# pylint: disable=protected-access
@tf_export('resource_loader.get_data_files_path')
def get_data_files_path():
"""Get a direct path to the data files colocated with the script.
@ -62,6 +65,7 @@ def get_data_files_path():
return _os.path.dirname(_inspect.getfile(_sys._getframe(1)))
@tf_export('resource_loader.get_root_dir_with_all_resources')
def get_root_dir_with_all_resources():
"""Get a root directory containing all the data attributes in the build rule.
@ -101,6 +105,7 @@ def get_root_dir_with_all_resources():
return data_files_dir or script_dir
@tf_export('resource_loader.get_path_to_datafile')
def get_path_to_datafile(path):
"""Get the path to the specified file in the data dependencies.
@ -120,6 +125,7 @@ def get_path_to_datafile(path):
return _os.path.join(data_files_path, path)
@tf_export('resource_loader.readahead_file_path')
def readahead_file_path(path, readahead='128M'): # pylint: disable=unused-argument
"""Readahead files not implemented; simply returns given path."""
return path

View File

@ -35,6 +35,7 @@ import threading
import six
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export
# Don't use this directly. Use _get_logger() instead.
@ -90,30 +91,37 @@ def _get_logger():
_logger_lock.release()
@tf_export('logging.log')
def log(level, msg, *args, **kwargs):
_get_logger().log(level, msg, *args, **kwargs)
@tf_export('logging.debug')
def debug(msg, *args, **kwargs):
_get_logger().debug(msg, *args, **kwargs)
@tf_export('logging.error')
def error(msg, *args, **kwargs):
_get_logger().error(msg, *args, **kwargs)
@tf_export('logging.fatal')
def fatal(msg, *args, **kwargs):
_get_logger().fatal(msg, *args, **kwargs)
@tf_export('logging.info')
def info(msg, *args, **kwargs):
_get_logger().info(msg, *args, **kwargs)
@tf_export('logging.warn')
def warn(msg, *args, **kwargs):
_get_logger().warn(msg, *args, **kwargs)
@tf_export('logging.warning')
def warning(msg, *args, **kwargs):
_get_logger().warning(msg, *args, **kwargs)
@ -136,15 +144,18 @@ _log_prefix = None # later set to google2_log_prefix
_log_counter_per_token = {}
@tf_export('logging.TaskLevelStatusMessage')
def TaskLevelStatusMessage(msg):
error(msg)
@tf_export('logging.flush')
def flush():
raise NotImplementedError()
# Code below is taken from pyglib/logging
@tf_export('logging.vlog')
def vlog(level, msg, *args, **kwargs):
_get_logger().log(level, msg, *args, **kwargs)
@ -164,6 +175,7 @@ def _GetNextLogCountPerToken(token):
return _log_counter_per_token[token]
@tf_export('logging.log_every_n')
def log_every_n(level, msg, n, *args):
"""Log 'msg % args' at level 'level' once per 'n' times.
@ -180,6 +192,7 @@ def log_every_n(level, msg, n, *args):
log_if(level, msg, not (count % n), *args)
@tf_export('logging.log_first_n')
def log_first_n(level, msg, n, *args): # pylint: disable=g-bad-name
"""Log 'msg % args' at level 'level' only first 'n' times.
@ -195,6 +208,7 @@ def log_first_n(level, msg, n, *args): # pylint: disable=g-bad-name
log_if(level, msg, count < n, *args)
@tf_export('logging.log_if')
def log_if(level, msg, condition, *args):
"""Log 'msg % args' at level 'level' only if condition is fulfilled."""
if condition:
@ -251,11 +265,13 @@ def google2_log_prefix(level, timestamp=None, file_and_line=None):
return s
@tf_export('logging.get_verbosity')
def get_verbosity():
"""Return how much logging output will be produced."""
return _get_logger().getEffectiveLevel()
@tf_export('logging.set_verbosity')
def set_verbosity(v):
"""Sets the threshold for what messages will be logged."""
_get_logger().setLevel(v)
@ -296,4 +312,10 @@ _allowed_symbols = [
'warning',
]
tf_export('logging.DEBUG').export_constant(__name__, 'DEBUG')
tf_export('logging.ERROR').export_constant(__name__, 'ERROR')
tf_export('logging.FATAL').export_constant(__name__, 'FATAL')
tf_export('logging.INFO').export_constant(__name__, 'INFO')
tf_export('logging.WARN').export_constant(__name__, 'WARN')
remove_undocumented(__name__, _allowed_symbols)

View File

@ -33,6 +33,7 @@ from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_PROFILE_OPTIONS = 0
_DEFAULT_ADVISE_OPTIONS = 0
@ -121,6 +122,7 @@ def _build_advisor_options(options):
return opts
@tf_export('profiler.Profiler')
class Profiler(object):
"""TensorFlow multi-step profiler.
@ -304,6 +306,7 @@ class Profiler(object):
print_mdl.WriteProfile(filename)
@tf_export('profiler.profile')
def profile(graph=None,
run_meta=None,
op_log=None,
@ -378,6 +381,7 @@ def profile(graph=None,
return tfprof_node
@tf_export('profiler.advise')
def advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.

View File

@ -20,8 +20,10 @@ from __future__ import print_function
import copy
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
@tf_export('profiler.ProfileOptionBuilder')
class ProfileOptionBuilder(object):
# pylint: disable=line-too-long
"""Option Builder for Profiling API.

View File

@ -30,6 +30,7 @@ from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.profiler.internal import flops_registry # pylint: disable=unused-import
from tensorflow.python.util.tf_export import tf_export
TRAINABLE_VARIABLES = '_trainable_variables'
REGISTERED_FLOP_STATS = 'flops'
@ -187,6 +188,7 @@ def merge_default_with_oplog(graph, op_log=None, run_meta=None,
return tmp_op_log
@tf_export('profiler.write_op_log')
def write_op_log(graph, log_dir, op_log=None, run_meta=None, add_trace=True):
"""Log provided 'op_log', and add additional model information below.

View File

@ -32,6 +32,7 @@ from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import plugin_asset
from tensorflow.python.summary.writer.event_file_writer import EventFileWriter
from tensorflow.python.util.tf_export import tf_export
_PLUGINS_DIR = "plugins"
@ -276,6 +277,7 @@ class SummaryToEventTransformer(object):
self.event_writer.add_event(event)
@tf_export("summary.FileWriter")
class FileWriter(SummaryToEventTransformer):
"""Writes `Summary` protocol buffers to event files.

View File

@ -22,8 +22,10 @@ import threading
from tensorflow.python.framework import ops
from tensorflow.python.summary.writer.writer import FileWriter
from tensorflow.python.util.tf_export import tf_export
@tf_export('summary.FileWriterCache')
class FileWriterCache(object):
"""Cache for file writers.

View File

@ -41,8 +41,10 @@ import numpy as _np
import six as _six
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export
@tf_export('compat.as_bytes', 'compat.as_str')
def as_bytes(bytes_or_text, encoding='utf-8'):
"""Converts either bytes or unicode to `bytes`, using utf-8 encoding for text.
@ -65,6 +67,7 @@ def as_bytes(bytes_or_text, encoding='utf-8'):
(bytes_or_text,))
@tf_export('compat.as_text')
def as_text(bytes_or_text, encoding='utf-8'):
"""Returns the given argument as a unicode string.
@ -93,6 +96,7 @@ else:
as_str = as_text
@tf_export('compat.as_str_any')
def as_str_any(value):
"""Converts to `str` as `str(value)`, but use `as_str` for `bytes`.
@ -125,11 +129,16 @@ def path_to_str(path):
# Numpy 1.8 scalars don't inherit from numbers.Integral in Python 3, so we
# need to check them specifically. The same goes from Real and Complex.
integral_types = (_numbers.Integral, _np.integer)
tf_export('compat.integral_types').export_constant(__name__, 'integral_types')
real_types = (_numbers.Real, _np.integer, _np.floating)
tf_export('compat.real_types').export_constant(__name__, 'real_types')
complex_types = (_numbers.Complex, _np.number)
tf_export('compat.complex_types').export_constant(__name__, 'complex_types')
# Either bytes or text.
bytes_or_text_types = (bytes, _six.text_type)
tf_export('compat.bytes_or_text_types').export_constant(__name__,
'bytes_or_text_types')
_allowed_symbols = [
'as_str',

View File

@ -78,6 +78,15 @@ genrule(
"api/sets/__init__.py",
"api/summary/__init__.py",
"api/train/queue_runner/__init__.py",
"api/compat/__init__.py",
"api/data/__init__.py",
"api/estimator/__init__.py",
"api/estimator/export/__init__.py",
"api/estimator/inputs/__init__.py",
"api/feature_column/__init__.py",
"api/losses/__init__.py",
"api/profiler/__init__.py",
"api/python_io/__init__.py",
],
cmd = "$(location create_python_api) $(OUTS)",
tools = ["create_python_api"],