Fix a bunch of bad links and missing docs in contrib.

Change: 154820641
This commit is contained in:
Mark Daoust 2017-05-02 04:08:23 -08:00 committed by TensorFlower Gardener
parent 9e289ce040
commit 883e32600e
10 changed files with 132 additions and 242 deletions

View File

@ -15,74 +15,6 @@
"""Classes representing statistical distributions and ops for working with them.
See the @{$python/contrib.distributions} guide.
## Distribution Object
@@ReparameterizationType
@@Distribution
## Individual Distributions
@@Binomial
@@Bernoulli
@@BernoulliWithSigmoidProbs
@@Beta
@@BetaWithSoftplusConcentration
@@Categorical
@@Chi2
@@Chi2WithAbsDf
@@Deterministic
@@VectorDeterministic
@@Exponential
@@ExponentialWithSoftplusRate
@@Gamma
@@GammaWithSoftplusConcentrationRate
@@Geometric
@@InverseGamma
@@InverseGammaWithSoftplusConcentrationRate
@@Laplace
@@LaplaceWithSoftplusScale
@@Logistic
@@NegativeBinomial
@@Normal
@@NormalWithSoftplusScale
@@Poisson
@@StudentT
@@StudentTWithAbsDfSoftplusScale
@@Uniform
@@MultivariateNormalDiag
@@MultivariateNormalTriL
@@MultivariateNormalDiagPlusLowRank
@@MultivariateNormalDiagWithSoftplusScale
@@Dirichlet
@@DirichletMultinomial
@@Multinomial
@@WishartCholesky
@@WishartFull
@@TransformedDistribution
@@QuantizedDistribution
@@Mixture
@@ExpRelaxedOneHotCategorical
@@OneHotCategorical
@@RelaxedBernoulli
@@RelaxedOneHotCategorical
## Kullback-Leibler Divergence
@@kl_divergence
@@RegisterKL
## Helper Functions
@@matrix_diag_transform
@@normal_conjugates_known_scale_posterior
@@normal_conjugates_known_scale_predictive
@@softplus_inverse
## Functions for statistics of samples
@@percentile
"""
from __future__ import absolute_import
from __future__ import division
@ -140,6 +72,71 @@ _allowed_symbols = [
'ConditionalTransformedDistribution',
'FULLY_REPARAMETERIZED',
'NOT_REPARAMETERIZED',
'Affine',
'AffineLinearOperator',
'Bijector',
'Chain',
'CholeskyOuterProduct',
'Exp',
'Identity',
'Inline',
'Invert',
'PowerTransform',
'SigmoidCentered',
'SoftmaxCentered',
'Softplus',
'ReparameterizationType',
'Distribution',
'Binomial',
'Bernoulli',
'BernoulliWithSigmoidProbs',
'Beta',
'BetaWithSoftplusConcentration',
'Categorical',
'Chi2',
'Chi2WithAbsDf',
'Deterministic',
'VectorDeterministic',
'Exponential',
'ExponentialWithSoftplusRate',
'Gamma',
'GammaWithSoftplusConcentrationRate',
'Geometric',
'InverseGamma',
'InverseGammaWithSoftplusConcentrationRate',
'Laplace',
'LaplaceWithSoftplusScale',
'Logistic',
'NegativeBinomial',
'Normal',
'NormalWithSoftplusScale',
'Poisson',
'StudentT',
'StudentTWithAbsDfSoftplusScale',
'Uniform',
'MultivariateNormalDiag',
'MultivariateNormalTriL',
'MultivariateNormalDiagPlusLowRank',
'MultivariateNormalDiagWithSoftplusScale',
'Dirichlet',
'DirichletMultinomial',
'Multinomial',
'WishartCholesky',
'WishartFull',
'TransformedDistribution',
'QuantizedDistribution',
'Mixture',
'ExpRelaxedOneHotCategorical',
'OneHotCategorical',
'RelaxedBernoulli',
'RelaxedOneHotCategorical',
'kl_divergence',
'RegisterKL',
'matrix_diag_transform',
'normal_conjugates_known_scale_posterior',
'normal_conjugates_known_scale_predictive',
'softplus_inverse',
'percentile'
]
remove_undocumented(__name__, _allowed_symbols)

View File

@ -22,10 +22,26 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.losses.python import losses
# pylint: disable=wildcard-import
from tensorflow.contrib.losses.python.losses import *
# pylint: enable=unused-import,wildcard-import
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, doc_string_modules=[losses])
_allowed_symbols = [
'absolute_difference',
'add_loss',
'hinge_loss',
'compute_weighted_loss',
'cosine_distance',
'get_losses',
'get_regularization_losses',
'get_total_loss',
'log_loss',
'mean_pairwise_squared_error',
'mean_squared_error',
'sigmoid_cross_entropy',
'softmax_cross_entropy',
'sparse_softmax_cross_entropy',
]
remove_undocumented(__name__, _allowed_symbols)

View File

@ -12,127 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Loss operations for use in neural networks.
"""Ops for building neural network losses.
Note: By default all the losses are collected into the `GraphKeys.LOSSES`
collection.
All of the loss functions take a pair of predictions and ground truth labels,
from which the loss is computed. It is assumed that the shape of both these
tensors is of the form [batch_size, d1, ... dN] where `batch_size` is the number
of samples in the batch and `d1` ... `dN` are the remaining dimensions.
It is common, when training with multiple loss functions, to adjust the relative
strengths of individual losses. This is performed by rescaling the losses via
a `weight` parameter passed to the loss functions. For example, if we were
training with both log_loss and sum_of_squares_loss, and we wished that the
log_loss penalty be twice as severe as the sum_of_squares_loss, we would
implement this as:
# Explicitely set the weight.
tf.contrib.losses.log(predictions, labels, weight=2.0)
# Uses default weight of 1.0
tf.contrib.losses.sum_of_squares(predictions, labels)
# All the losses are collected into the `GraphKeys.LOSSES` collection.
losses = tf.get_collection(tf.GraphKeys.LOSSES)
While specifying a scalar loss rescales the loss over the entire batch,
we sometimes want to rescale the loss per batch sample. For example, if we have
certain examples that matter more to us to get correctly, we might want to have
a higher loss that other samples whose mistakes matter less. In this case, we
can provide a weight vector of length `batch_size` which results in the loss
for each sample in the batch being scaled by the corresponding weight element.
For example, consider the case of a classification problem where we want to
maximize our accuracy but we especially interested in obtaining high accuracy
for a specific class:
inputs, labels = LoadData(batch_size=3)
logits = MyModelPredictions(inputs)
# Ensures that the loss for examples whose ground truth class is `3` is 5x
# higher than the loss for all other examples.
weight = tf.multiply(4, tf.cast(tf.equal(labels, 3), tf.float32)) + 1
onehot_labels = tf.one_hot(labels, num_classes=5)
tf.contrib.losses.softmax_cross_entropy(logits, onehot_labels, weight=weight)
Finally, in certain cases, we may want to specify a different loss for every
single measurable value. For example, if we are performing per-pixel depth
prediction, or per-pixel denoising, a single batch sample has P values where P
is the number of pixels in the image. For many losses, the number of measurable
values matches the number of elements in the predictions and labels tensors.
For others, such as softmax_cross_entropy and cosine_distance, the
loss functions reduces the dimensions of the inputs to produces a tensor of
losses for each measurable value. For example, softmax_cross_entropy takes as
input predictions and labels of dimension [batch_size, num_classes] but the
number of measurable values is [batch_size]. Consequently, when passing a weight
tensor to specify a different loss for every measurable value, the dimension of
the tensor will depend on the loss being used.
For a concrete example, consider the case of per-pixel depth prediction where
certain ground truth depth values are missing (due to sensor noise in the
capture process). In this case, we want to assign zero weight to losses for
these predictions.
# 'depths' that are missing have a value of 0:
images, depths = LoadData(...)
predictions = MyModelPredictions(images)
weight = tf.cast(tf.greater(depths, 0), tf.float32)
loss = tf.contrib.losses.sum_of_squares(predictions, depths, weight)
Note that when using weights for the losses, the final average is computed
by rescaling the losses by the weights and then dividing by the total number of
non-zero samples. For an arbitrary set of weights, this may not necessarily
produce a weighted average. Instead, it simply and transparently rescales the
per-element losses before averaging over the number of observations. For example
if the losses computed by the loss function is an array [4, 1, 2, 3] and the
weights are an array [1, 0.5, 3, 9], then the average loss is:
(4*1 + 1*0.5 + 2*3 + 3*9) / 4
However, with a single loss function and an arbitrary set of weights, one can
still easily create a loss function such that the resulting loss is a
weighted average over the individual prediction errors:
images, labels = LoadData(...)
predictions = MyModelPredictions(images)
weight = MyComplicatedWeightingFunction(labels)
weight = tf.div(weight, tf.size(weight))
loss = tf.contrib.losses.sum_of_squares(predictions, depths, weight)
@@absolute_difference
@@add_loss
@@hinge_loss
@@compute_weighted_loss
@@cosine_distance
@@get_losses
@@get_regularization_losses
@@get_total_loss
@@log_loss
@@mean_pairwise_squared_error
@@mean_squared_error
@@sigmoid_cross_entropy
@@softmax_cross_entropy
@@sparse_softmax_cross_entropy
The following are deprecated in favor of `mean_pairwise_squared_error` and
`mean_squared_error`.
@@sum_of_pairwise_squares
@@sum_of_squares
See @{$python/contrib.losses}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.losses.python.losses.loss_ops import *
from tensorflow.python.util.all_util import make_all
# pylint: enable=unused-import,wildcard-import
__all__ = make_all(__name__)
# pylint: enable=wildcard-import

View File

@ -16,36 +16,6 @@
"""Ops for building neural network seq2seq decoders and losses.
See the @{$python/contrib.seq2seq} guide.
@@Decoder
@@dynamic_decode
@@BasicDecoderOutput
@@BasicDecoder
@@BeamSearchDecoderOutput
@@BeamSearchDecoderState
@@BeamSearchDecoder
@@FinalBeamSearchDecoderOutput
@@Helper
@@CustomHelper
@@GreedyEmbeddingHelper
@@ScheduledEmbeddingTrainingHelper
@@ScheduledOutputTrainingHelper
@@TrainingHelper
@@BahdanauAttention
@@LuongAttention
@@hardmax
@@AttentionWrapperState
@@AttentionWrapper
@@gather_tree
@@tile_batch
"""
from __future__ import absolute_import
@ -63,6 +33,30 @@ from tensorflow.contrib.seq2seq.python.ops.loss import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,widcard-import,line-too-long
_allowed_symbols = ["sequence_loss"]
_allowed_symbols = [
"sequence_loss",
"Decoder",
"dynamic_decode",
"BasicDecoder",
"BasicDecoderOutput",
"BeamSearchDecoder",
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"Helper",
"CustomHelper",
"FinalBeamSearchDecoderOutput",
"gather_tree",
"GreedyEmbeddingHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"TrainingHelper",
"BahdanauAttention",
"LuongAttention",
"hardmax",
"AttentionWrapperState",
"AttentionWrapper",
"AttentionMechanism",
"tile_batch"]
remove_undocumented(__name__, _allowed_symbols)

View File

@ -39,6 +39,7 @@ from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",

View File

@ -137,16 +137,16 @@ which to operate must always be given explicitly. This is the reason why
## Module: reroute
* @{tf.contrib.graph_editor.reroute.swap_ts}
* @{tf.contrib.graph_editor.reroute.reroute_ts}
* @{tf.contrib.graph_editor.reroute.swap_inputs}
* @{tf.contrib.graph_editor.reroute.reroute_inputs}
* @{tf.contrib.graph_editor.reroute.swap_outputs}
* @{tf.contrib.graph_editor.reroute.reroute_outputs}
* @{tf.contrib.graph_editor.reroute.swap_ios}
* @{tf.contrib.graph_editor.reroute.reroute_ios}
* @{tf.contrib.graph_editor.reroute.remove_control_inputs}
* @{tf.contrib.graph_editor.reroute.add_control_inputs}
* @{tf.contrib.graph_editor.swap_ts}
* @{tf.contrib.graph_editor.reroute_ts}
* @{tf.contrib.graph_editor.swap_inputs}
* @{tf.contrib.graph_editor.reroute_inputs}
* @{tf.contrib.graph_editor.swap_outputs}
* @{tf.contrib.graph_editor.reroute_outputs}
* @{tf.contrib.graph_editor.swap_ios}
* @{tf.contrib.graph_editor.reroute_ios}
* @{tf.contrib.graph_editor.remove_control_inputs}
* @{tf.contrib.graph_editor.add_control_inputs}
## Module: edit

View File

@ -21,7 +21,7 @@ Subclasses of `LinearOperator` provide a access to common methods on a
* @{tf.contrib.linalg.LinearOperatorDiag}
* @{tf.contrib.linalg.LinearOperatorIdentity}
* @{tf.contrib.linalg.LinearOperatorScaledIdentity}
* @{tf.contrib.linalg.LinearOperatorMatrix}
* @{tf.contrib.linalg.LinearOperatorFullMatrix}
* @{tf.contrib.linalg.LinearOperatorTriL}
* @{tf.contrib.linalg.LinearOperatorUDVHUpdate}

View File

@ -13,8 +13,8 @@ of samples in the batch and `d1` ... `dN` are the remaining dimensions.
It is common, when training with multiple loss functions, to adjust the relative
strengths of individual losses. This is performed by rescaling the losses via
a `weight` parameter passed to the loss functions. For example, if we were
training with both log_loss and sum_of_squares_loss, and we wished that the
log_loss penalty be twice as severe as the sum_of_squares_loss, we would
training with both log_loss and mean_square_error, and we wished that the
log_loss penalty be twice as severe as the mean_square_error, we would
implement this as:
```python
@ -22,7 +22,7 @@ implement this as:
tf.contrib.losses.log(predictions, labels, weight=2.0)
# Uses default weight of 1.0
tf.contrib.losses.sum_of_squares(predictions, labels)
tf.contrib.losses.mean_square_error(predictions, labels)
# All the losses are collected into the `GraphKeys.LOSSES` collection.
losses = tf.get_collection(tf.GraphKeys.LOSSES)
@ -74,7 +74,7 @@ these predictions.
predictions = MyModelPredictions(images)
weight = tf.cast(tf.greater(depths, 0), tf.float32)
loss = tf.contrib.losses.sum_of_squares(predictions, depths, weight)
loss = tf.contrib.losses.mean_square_error(predictions, depths, weight)
```
Note that when using weights for the losses, the final average is computed
@ -100,7 +100,7 @@ weighted average over the individual prediction errors:
weight = MyComplicatedWeightingFunction(labels)
weight = tf.div(weight, tf.size(weight))
loss = tf.contrib.losses.sum_of_squares(predictions, depths, weight)
loss = tf.contrib.losses.mean_square_error(predictions, depths, weight)
```
@{tf.contrib.losses.absolute_difference}
@ -118,9 +118,4 @@ weighted average over the individual prediction errors:
@{tf.contrib.losses.softmax_cross_entropy}
@{tf.contrib.losses.sparse_softmax_cross_entropy}
The following are deprecated in favor of `mean_pairwise_squared_error` and
`mean_squared_error`.
@{tf.contrib.losses.sum_of_pairwise_squares}
@{tf.contrib.losses.sum_of_squares}

View File

@ -278,7 +278,7 @@ Then, the code creates a `DNNClassifier` model using the following arguments:
The `tf.contrib.learn` API uses input functions, which create the TensorFlow
operations that generate data for the model. In this case, the data is small
enough that it can be stored in @{tf.constant TensorFlow constants}. The
enough that it can be stored in @{tf.constant$TensorFlow constants}. The
following code produces the simplest possible input pipeline:
```python

View File

@ -190,7 +190,6 @@ def _get_default_do_not_descend_map():
'tensor_forest',
'tensorboard',
'testing',
'training',
'tfprof',
],
'contrib.bayesflow': [