diff --git a/tensorflow/python/keras/engine/base_layer.py b/tensorflow/python/keras/engine/base_layer.py index 4b732c07bec..da6099aa186 100644 --- a/tensorflow/python/keras/engine/base_layer.py +++ b/tensorflow/python/keras/engine/base_layer.py @@ -96,9 +96,11 @@ _AUTOCAST_TYPES = (ops.Tensor, sparse_tensor.SparseTensor, keras_layers_gauge = monitoring.BoolGauge('/tensorflow/api/keras/layers', 'keras layers usage', 'method') +keras_models_gauge = monitoring.BoolGauge( + '/tensorflow/api/keras/models', 'keras model usage', 'method') keras_api_gauge = monitoring.BoolGauge('/tensorflow/api/keras', 'keras api usage', 'method') -keras_model_gauge = monitoring.BoolGauge( +keras_premade_model_gauge = monitoring.BoolGauge( '/tensorflow/api/keras/premade_models', 'premade keras model usage', 'type') @@ -304,7 +306,10 @@ class Layer(module.Module, version_utils.LayerVersionSelector): dynamic=False, **kwargs): keras_api_gauge.get_cell('layer').set(True) - keras_layers_gauge.get_cell(self.__class__.__name__).set(True) + if getattr(self, '_is_model_for_instrumentation', False): + keras_models_gauge.get_cell(self.__class__.__name__).set(True) + else: + keras_layers_gauge.get_cell(self.__class__.__name__).set(True) # These properties should be set by the user via keyword arguments. # note that 'dtype', 'input_shape' and 'batch_input_shape' # are only applicable to input layers: do not pass these keywords diff --git a/tensorflow/python/keras/engine/data_adapter.py b/tensorflow/python/keras/engine/data_adapter.py index f4c80feddc8..2cc6f69403e 100644 --- a/tensorflow/python/keras/engine/data_adapter.py +++ b/tensorflow/python/keras/engine/data_adapter.py @@ -34,6 +34,7 @@ from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import input_lib from tensorflow.python.eager import context +from tensorflow.python.eager import monitoring from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops @@ -52,6 +53,9 @@ from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export +keras_data_adapter_gauge = monitoring.BoolGauge( + "/tensorflow/api/keras/data_adapters", "keras data adapter usage", "method") + try: from scipy import sparse as scipy_sparse # pylint: disable=g-import-not-at-top except ImportError: @@ -961,6 +965,8 @@ def select_data_adapter(x, y): "handling inputs. Found multiple adapters {} to handle " "input: {}, {}".format( adapter_cls, _type_name(x), _type_name(y))) + # Instrument the data adapter usage before returning it + keras_data_adapter_gauge.get_cell(adapter_cls[0].__name__).set(True) return adapter_cls[0] diff --git a/tensorflow/python/keras/engine/training.py b/tensorflow/python/keras/engine/training.py index 1109c0d5ed8..2c228583a1d 100644 --- a/tensorflow/python/keras/engine/training.py +++ b/tensorflow/python/keras/engine/training.py @@ -218,6 +218,7 @@ class Model(base_layer.Layer, version_utils.ModelVersionSelector): @trackable.no_automatic_dependency_tracking def __init__(self, *args, **kwargs): + self._is_model_for_instrumentation = True base_layer.keras_api_gauge.get_cell('model').set(True) # Special case for Subclassed Functional Model, which we couldn't detect diff --git a/tensorflow/python/keras/layers/core.py b/tensorflow/python/keras/layers/core.py index c0932e7297b..ad3a881a794 100644 --- a/tensorflow/python/keras/layers/core.py +++ b/tensorflow/python/keras/layers/core.py @@ -30,6 +30,7 @@ import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.eager import context +from tensorflow.python.eager import monitoring from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops @@ -62,6 +63,13 @@ from tensorflow.python.util.tf_export import get_canonical_name_for_symbol from tensorflow.python.util.tf_export import get_symbol_from_name from tensorflow.python.util.tf_export import keras_export +# TODO(b/168039935): track dropout rate to decide whether/how to make a +# dropout rate fastpath. +keras_temporary_dropout_rate = monitoring.BoolGauge( + '/tensorflow/api/keras/dropout/temp_rate_is_zero', + 'Temporarily record if Keras dropout layer was created w/' + 'constant rate = 0') + # pylint: disable=g-classes-have-attributes @keras_export('keras.layers.Masking') @@ -186,6 +194,10 @@ class Dropout(Layer): def __init__(self, rate, noise_shape=None, seed=None, **kwargs): super(Dropout, self).__init__(**kwargs) self.rate = rate + if isinstance(rate, (int, float)) and not rate: + keras_temporary_dropout_rate.get_cell().set(True) + else: + keras_temporary_dropout_rate.get_cell().set(False) self.noise_shape = noise_shape self.seed = seed self.supports_masking = True diff --git a/tensorflow/python/keras/optimizer_v2/optimizer_v2.py b/tensorflow/python/keras/optimizer_v2/optimizer_v2.py index 0da75edf0c5..ca3f1a3a9b1 100644 --- a/tensorflow/python/keras/optimizer_v2/optimizer_v2.py +++ b/tensorflow/python/keras/optimizer_v2/optimizer_v2.py @@ -30,6 +30,7 @@ from tensorflow.python.distribute import parameter_server_strategy from tensorflow.python.distribute import values as ds_values from tensorflow.python.eager import backprop from tensorflow.python.eager import context +from tensorflow.python.eager import monitoring from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util @@ -54,6 +55,9 @@ from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export +keras_optimizers_gauge = monitoring.BoolGauge( + "/tensorflow/api/keras/optimizers", "keras optimizer usage", "method") + _DEFAULT_VALID_DTYPES = frozenset([ dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128 @@ -326,6 +330,9 @@ class OptimizerV2(trackable.Trackable): Raises: ValueError: in case of any invalid argument. """ + # Instrument optimizer usages + keras_optimizers_gauge.get_cell(self.__class__.__name__).set(True) + allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay", "global_clipnorm"} for k in kwargs: if k not in allowed_kwargs: diff --git a/tensorflow/python/keras/premade/linear.py b/tensorflow/python/keras/premade/linear.py index 438e3270021..f8ea38fa5f6 100644 --- a/tensorflow/python/keras/premade/linear.py +++ b/tensorflow/python/keras/premade/linear.py @@ -95,7 +95,7 @@ class LinearModel(training.Model): self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) super(LinearModel, self).__init__(**kwargs) - base_layer.keras_model_gauge.get_cell('Linear').set(True) + base_layer.keras_premade_model_gauge.get_cell('Linear').set(True) def build(self, input_shape): if isinstance(input_shape, dict): diff --git a/tensorflow/python/keras/premade/wide_deep.py b/tensorflow/python/keras/premade/wide_deep.py index edb0124276f..1f70a38cc93 100644 --- a/tensorflow/python/keras/premade/wide_deep.py +++ b/tensorflow/python/keras/premade/wide_deep.py @@ -85,7 +85,7 @@ class WideDeepModel(keras_training.Model): Allowed keyword arguments include `name`. """ super(WideDeepModel, self).__init__(**kwargs) - base_layer.keras_model_gauge.get_cell('WideDeep').set(True) + base_layer.keras_premade_model_gauge.get_cell('WideDeep').set(True) self.linear_model = linear_model self.dnn_model = dnn_model self.activation = activations.get(activation)