From 186d9b18da83758e3e3e2d0270a96276fb0d792c Mon Sep 17 00:00:00 2001 From: Thomas O'Malley Date: Fri, 15 Jan 2021 16:06:05 -0800 Subject: [PATCH] PreprocessingLayers adapt API changes. Changed methods: - adapt(data, batch_size=None, steps=None, reset_state) Added methods: - update_state - merge_state - finalize_state - compile - make_adapt_function Reimplements adapt on top of existing Model.fit utilities. In follow-up changes, each subclass will be migrated to use the new API methods directly, and the CombinerPreprocessingLayer class will be removed. PiperOrigin-RevId: 352102154 Change-Id: If993c07e68b6652896010a25a21dad0560e93329 --- .../keras/engine/base_preprocessing_layer.py | 375 +++++++++++++----- .../engine/base_preprocessing_layer_test.py | 31 +- .../engine/base_preprocessing_layer_v1.py | 102 +++++ .../python/keras/engine/data_adapter.py | 29 +- .../preprocessing/category_encoding_test.py | 6 +- .../preprocessing/normalization_test.py | 40 -- .../preprocessing/preprocessing_stage.py | 10 +- .../preprocessing/preprocessing_stage_test.py | 17 - ...tal.preprocessing.-category-crossing.pbtxt | 34 +- ...tal.preprocessing.-category-encoding.pbtxt | 32 ++ ...erimental.preprocessing.-center-crop.pbtxt | 34 +- ...mental.preprocessing.-discretization.pbtxt | 34 +- ....experimental.preprocessing.-hashing.pbtxt | 34 +- ...mental.preprocessing.-integer-lookup.pbtxt | 32 ++ ...imental.preprocessing.-normalization.pbtxt | 32 ++ ...l.preprocessing.-preprocessing-layer.pbtxt | 36 +- ...ental.preprocessing.-random-contrast.pbtxt | 34 +- ...erimental.preprocessing.-random-crop.pbtxt | 34 +- ...erimental.preprocessing.-random-flip.pbtxt | 34 +- ...imental.preprocessing.-random-height.pbtxt | 34 +- ...ental.preprocessing.-random-rotation.pbtxt | 34 +- ...al.preprocessing.-random-translation.pbtxt | 34 +- ...rimental.preprocessing.-random-width.pbtxt | 34 +- ...erimental.preprocessing.-random-zoom.pbtxt | 34 +- ...xperimental.preprocessing.-rescaling.pbtxt | 34 +- ...experimental.preprocessing.-resizing.pbtxt | 34 +- ...imental.preprocessing.-string-lookup.pbtxt | 32 ++ ...al.preprocessing.-text-vectorization.pbtxt | 32 ++ ...tal.preprocessing.-category-crossing.pbtxt | 34 +- ...tal.preprocessing.-category-encoding.pbtxt | 32 ++ ...erimental.preprocessing.-center-crop.pbtxt | 34 +- ...mental.preprocessing.-discretization.pbtxt | 34 +- ....experimental.preprocessing.-hashing.pbtxt | 34 +- ...mental.preprocessing.-integer-lookup.pbtxt | 32 ++ ...imental.preprocessing.-normalization.pbtxt | 34 +- ...l.preprocessing.-preprocessing-layer.pbtxt | 36 +- ...ental.preprocessing.-random-contrast.pbtxt | 34 +- ...erimental.preprocessing.-random-crop.pbtxt | 34 +- ...erimental.preprocessing.-random-flip.pbtxt | 34 +- ...imental.preprocessing.-random-height.pbtxt | 34 +- ...ental.preprocessing.-random-rotation.pbtxt | 34 +- ...al.preprocessing.-random-translation.pbtxt | 34 +- ...rimental.preprocessing.-random-width.pbtxt | 34 +- ...erimental.preprocessing.-random-zoom.pbtxt | 34 +- ...xperimental.preprocessing.-rescaling.pbtxt | 34 +- ...experimental.preprocessing.-resizing.pbtxt | 34 +- ...imental.preprocessing.-string-lookup.pbtxt | 32 ++ ...al.preprocessing.-text-vectorization.pbtxt | 32 ++ 48 files changed, 1737 insertions(+), 219 deletions(-) diff --git a/tensorflow/python/keras/engine/base_preprocessing_layer.py b/tensorflow/python/keras/engine/base_preprocessing_layer.py index 5b3927c1351..408a6902913 100644 --- a/tensorflow/python/keras/engine/base_preprocessing_layer.py +++ b/tensorflow/python/keras/engine/base_preprocessing_layer.py @@ -23,20 +23,21 @@ import collections import numpy as np import six -from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context +from tensorflow.python.eager import def_function from tensorflow.python.eager import monitoring from tensorflow.python.framework import dtypes -from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor -from tensorflow.python.framework import type_spec from tensorflow.python.keras import backend as K -from tensorflow.python.keras.engine import training_generator_v1 +from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.utils import tf_utils +from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops +from tensorflow.python.ops import variables from tensorflow.python.ops.ragged import ragged_tensor +from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util.tf_export import keras_export @@ -48,25 +49,227 @@ keras_kpl_gauge = monitoring.BoolGauge( @keras_export('keras.layers.experimental.preprocessing.PreprocessingLayer') @six.add_metaclass(abc.ABCMeta) class PreprocessingLayer(Layer): - """Base class for PreprocessingLayers.""" + """Base class for PreprocessingLayers. + + Attributes: + stateful: Whether the layer contains state that needs to be adapted via + `PreprocessingLayer.adapt`. + streaming: Whether a layer can be adapted multiple times without resetting + the state of the layer. + """ _must_restore_from_config = True - def adapt(self, data, reset_state=True): - # TODO(momernick): Add examples. + def __init__(self, stateful=False, streaming=True, **kwargs): + super(PreprocessingLayer, self).__init__(**kwargs) + self._stateful = stateful + self._streaming = streaming + self._is_compiled = False + self._is_adapted = False + + # Sets `is_adapted=False` when `reset_state` is called. + self._reset_state_impl = self.reset_state + self.reset_state = self._reset_state_wrapper + + self._adapt_function = None + + @property + def streaming(self): + """Whether `adapt` can be called twice without resetting the state.""" + return self._streaming + + @property + def is_adapted(self): + """Whether the layer has been fit to data already.""" + return self._is_adapted + + def update_state(self, data): + """Accumulates statistics for the preprocessing layer. + + Arguments: + data: A mini-batch of inputs to the layer. + """ + if self.stateful: + raise NotImplementedError + + def reset_state(self): + """Resets the statistics of the preprocessing layer.""" + if self.stateful: + raise NotImplementedError + + def merge_state(self, layers): + """Merge the statistics of multiple preprocessing layers. + + This layer will contain the merged state. + + Arguments: + layers: Layers whose statistics should be merge with the statistics of + this layer. + """ + if self.stateful: + raise NotImplementedError + + def finalize_state(self): + """Finalize the statistics for the preprocessing layer. + + This method is called at the end of `adapt`. This method + handles any one-time operations that should occur after all + data has been seen. + """ + pass + + def make_adapt_function(self): + """Creates a function to execute one step of `adapt`. + + This method can be overridden to support custom adapt logic. + This method is called by `PreprocessingLayer.adapt`. + + Typically, this method directly controls `tf.function` settings, + and delegates the actual state update logic to + `PreprocessingLayer.update_state`. + + This function is cached the first time `PreprocessingLayer.adapt` + is called. The cache is cleared whenever `PreprocessingLayer.compile` + is called. + + Returns: + Function. The function created by this method should accept a + `tf.data.Iterator`, retrieve a batch, and update the state of the + layer. + """ + if self._adapt_function is not None: + return self._adapt_function + + def adapt_step(iterator): + data = next(iterator) + self._adapt_maybe_build(data) + self.update_state(data) + + if self._steps_per_execution.numpy().item() == 1: + adapt_fn = adapt_step + else: + + def adapt_fn(iterator): + for _ in math_ops.range(self._steps_per_execution): + adapt_step(iterator) + + if not self._run_eagerly: + adapt_fn = def_function.function(adapt_fn) + + self._adapt_function = adapt_fn + return self._adapt_function + + def compile(self, run_eagerly=None, steps_per_execution=None): + """Configures the layer for `adapt`. + + Arguments: + run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s logic + will not be wrapped in a `tf.function`. Recommended to leave this as + `None` unless your `Model` cannot be run inside a `tf.function`. + steps_per_execution: Int. Defaults to 1. The number of batches to run + during each `tf.function` call. Running multiple batches inside a + single `tf.function` call can greatly improve performance on TPUs or + small models with a large Python overhead. + """ + if steps_per_execution is None: + steps_per_execution = 1 + self._configure_steps_per_execution(steps_per_execution) + + if run_eagerly is None: + run_eagerly = self.dynamic + self._run_eagerly = run_eagerly + + self._is_compiled = True + + def adapt(self, data, batch_size=None, steps=None, reset_state=True): """Fits the state of the preprocessing layer to the data being passed. - Args: + Arguments: data: The data to train on. It can be passed either as a tf.data Dataset, or as a numpy array. + batch_size: Integer or `None`. + Number of samples per state update. + If unspecified, `batch_size` will default to 32. + Do not specify the `batch_size` if your data is in the + form of datasets, generators, or `keras.utils.Sequence` instances + (since they generate batches). + steps: Integer or `None`. + Total number of steps (batches of samples) + When training with input tensors such as + TensorFlow data tensors, the default `None` is equal to + the number of samples in your dataset divided by + the batch size, or 1 if that cannot be determined. If x is a + `tf.data` dataset, and 'steps' is None, the epoch will run until + the input dataset is exhausted. When passing an infinitely + repeating dataset, you must specify the `steps` argument. This + argument is not supported with array inputs. reset_state: Optional argument specifying whether to clear the state of the layer at the start of the call to `adapt`, or whether to start from the existing state. This argument may not be relevant to all preprocessing layers: a subclass of PreprocessingLayer may choose to - throw if 'reset_state' is set to False. + throw if 'reset_state' is set to False. """ - pass + _disallow_inside_tf_function('adapt') + if not self.stateful: + return + if not self.streaming and self._is_adapted and not reset_state: + raise ValueError('{} does not supporting calling `adapt` twice without ' + 'resetting the state.'.format(self.__class__.__name__)) + if not self._is_compiled: + self.compile() # Compile with defaults. + if self.built and reset_state: + self.reset_state() + data_handler = data_adapter.DataHandler( + data, + batch_size=batch_size, + steps_per_epoch=steps, + epochs=1, + steps_per_execution=self._steps_per_execution, + distribute=False) + self._adapt_function = self.make_adapt_function() + for _, iterator in data_handler.enumerate_epochs(): + with data_handler.catch_stop_iteration(): + for _ in data_handler.steps(): + self._adapt_function(iterator) + if data_handler.should_sync: + context.async_wait() + self.finalize_state() + self._is_adapted = True + + def _reset_state_wrapper(self): + """Calls `reset_state` and sets `adapted` to `False`.""" + self._reset_state_impl() + self._is_adapted = False + + @trackable.no_automatic_dependency_tracking + def _configure_steps_per_execution(self, steps_per_execution): + self._steps_per_execution = variables.Variable( + steps_per_execution, + dtype='int64', + aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA) + + # TODO(omalleyt): Unify this logic with `Layer._maybe_build`. + def _adapt_maybe_build(self, data): + if not self.built: + try: + # If this is a Numpy array or tensor, we can get shape from .shape. + # If not, an attribute error will be thrown. + data_shape = data.shape + data_shape_nones = tuple([None] * len(data.shape)) + except AttributeError: + # The input has an unknown number of dimensions. + data_shape = None + data_shape_nones = None + + # TODO (b/159261555): move this to base layer build. + batch_input_shape = getattr(self, '_batch_input_shape', None) + if batch_input_shape is None: + # Set the number of dimensions. + self._batch_input_shape = data_shape_nones + self.build(data_shape) + self.built = True +# TODO(omalleyt): This class will be gradually replaced. class CombinerPreprocessingLayer(PreprocessingLayer): """Base class for PreprocessingLayers that do computation using a Combiner. @@ -80,10 +283,41 @@ class CombinerPreprocessingLayer(PreprocessingLayer): """ def __init__(self, combiner, **kwargs): - super(CombinerPreprocessingLayer, self).__init__(**kwargs) - self._combiner = combiner - self._previously_updated = False + super(CombinerPreprocessingLayer, self).__init__(stateful=True, **kwargs) self.state_variables = collections.OrderedDict() + self._combiner = combiner + self._adapt_accumulator = None + + def reset_state(self): + self._adapt_accumulator = None + + def update_state(self, data): + if self._adapt_accumulator is None: + self._adapt_accumulator = self._get_accumulator() + self._adapt_accumulator = self._combiner.compute(data, + self._adapt_accumulator) + + def merge_state(self, layers): + accumulators = ([self._get_accumulator()] + + [l._get_accumulator() for l in layers]) # pylint: disable=protected-access + merged_accumulator = self._combiner.merge(accumulators) + self._set_accumulator(merged_accumulator) + + def finalize_state(self): + self._set_accumulator(self._adapt_accumulator) + + def compile(self, run_eagerly=None, steps_per_execution=None): + # TODO(omalleyt): Remove this once sublayers are switched to new APIs. + if run_eagerly is None: + run_eagerly = True + super(CombinerPreprocessingLayer, self).compile( + run_eagerly=run_eagerly, steps_per_execution=steps_per_execution) + + def adapt(self, data, batch_size=None, steps=None, reset_state=True): + if not reset_state: + self._adapt_accumulator = self._combiner.restore(self._restore_updates()) + super(CombinerPreprocessingLayer, self).adapt( + data, batch_size=batch_size, steps=steps, reset_state=reset_state) def _add_state_variable(self, name, @@ -130,103 +364,16 @@ class CombinerPreprocessingLayer(PreprocessingLayer): data_dict[name] = var.numpy() return data_dict - def _get_dataset_iterator(self, dataset): - """Gets an iterator from a tf.data.Dataset.""" - return dataset_ops.make_one_shot_iterator(dataset).get_next - - def adapt(self, data, reset_state=True): - """Fits the state of the preprocessing layer to the data being passed. - - Args: - data: The data to train on. It can be passed either as a tf.data Dataset, - or as a numpy array. - reset_state: Optional argument specifying whether to clear the state of - the layer at the start of the call to `adapt`, or whether to start from - the existing state. Subclasses may choose to throw if reset_state is set - to 'False'. - """ - if reset_state: - accumulator = None + def _get_accumulator(self): + if self._is_adapted: + return self._combiner.restore(self._restore_updates()) else: - accumulator = self._combiner.restore(self._restore_updates()) - if isinstance(data, (list, tuple)): - data = ops.convert_to_tensor_v2_with_dispatch(data) - if not isinstance(data, - (dataset_ops.DatasetV2, - np.ndarray, - ops.Tensor, - ragged_tensor.RaggedTensor)): - raise ValueError( - '`adapt()` requires a batched Dataset, a Tensor, ' - 'or a Numpy array as input, ' - 'got {}'.format(type(data))) - - if isinstance(data, dataset_ops.DatasetV2): - # Validate that the dataset only contains single-tensor elements. - if not isinstance(data.element_spec, type_spec.TypeSpec): - raise TypeError( - 'The dataset should yield single-Tensor elements. Use `dataset.map`' - 'to select the element of interest.\n' - 'Got dataset.element_spec=' + str(data.element_spec)) - # Validate the datasets to try and ensure we haven't been passed one with - # infinite size. That would cause an infinite loop here. - if tf_utils.dataset_is_infinite(data): - raise ValueError( - 'The dataset passed to `adapt()` has an infinite number of ' - 'elements. Please use `dataset.take(...)` to make the number ' - 'of elements finite.') - next_data = self._get_dataset_iterator(data) - # TODO(fchollet): consider checking if the dataset is already batched - # and otherwise batching it. - elif isinstance(data, (ops.Tensor, ragged_tensor.RaggedTensor)): - next_data = self._get_dataset_iterator( - dataset_ops.Dataset.from_tensor_slices(data).batch(512)) - else: - generator, _ = training_generator_v1.convert_to_generator_like( - data, batch_size=512) - # If the data is not a dataset, we can iterate over it using next(foo); - # here, we wrap that into a callable. - next_data = lambda: next(generator) - - # TODO(momernick): Some sort of status bar? - # TODO(momernick): Implement parallel processing here? - try: - data_element = next_data() - - # First, see if the layer is built or not. If it is not, then we must - # build it. - if not self.built: - try: - # If this is a Numpy array or tensor, we can get shape from .shape. - # If not, an attribute error will be thrown. - data_shape = data_element.shape - data_shape_nones = tuple([None]*len(data_element.shape)) - except AttributeError: - # The input has an unknown number of dimensions. - data_shape = None - data_shape_nones = None - - # TODO (b/159261555): move this to base layer build. - batch_input_shape = getattr(self, '_batch_input_shape', None) - if batch_input_shape is None: - # Set the number of dimensions. - self._batch_input_shape = data_shape_nones - - self.build(data_shape) - - # Once we have built the Layer, we can process the input data. We do so - # until we've gotten an exception indicating that we have no more data. - while True: - accumulator = self._combiner.compute(data_element, accumulator) - data_element = next_data() - # Note that this belongs to the outer indentation of 'try' - we need to - # catch exceptions resulting from the first 'next_data()' invocation as - # well. - except (StopIteration, errors.OutOfRangeError): - pass + return None + def _set_accumulator(self, accumulator): updates = self._combiner.extract(accumulator) self._set_state_variables(updates) + self._adapt_accumulator = None # Reset accumulator from adapt. def _set_state_variables(self, updates): """Directly update the internal state of this Layer. @@ -287,6 +434,7 @@ def convert_to_list(values, sparse_default_value=None): return values +# TODO(omalleyt): This class will be gradually replaced. class Combiner(object): """Functional object that defines a shardable computation. @@ -410,3 +558,18 @@ class Combiner(object): The accumulator represented by the passed byte_string. """ pass + + +def _disallow_inside_tf_function(method_name): + """Disallow calling a method inside a `tf.function`.""" + if ops.inside_function(): + error_msg = ( + 'Detected a call to `PreprocessingLayer.{method_name}` inside a ' + '`tf.function`. `PreprocessingLayer.{method_name} is a high-level ' + 'endpoint that manages its own `tf.function`. Please move the call ' + 'to `PreprocessingLayer.{method_name}` outside of all enclosing ' + '`tf.function`s. Note that you can call a `PreprocessingLayer` ' + 'directly on `Tensor`s inside a `tf.function` like: `layer(x)`, ' + 'or update its state like: `layer.update_state(x)`.').format( + method_name=method_name) + raise RuntimeError(error_msg) diff --git a/tensorflow/python/keras/engine/base_preprocessing_layer_test.py b/tensorflow/python/keras/engine/base_preprocessing_layer_test.py index bcb47a26fee..aad7bee808f 100644 --- a/tensorflow/python/keras/engine/base_preprocessing_layer_test.py +++ b/tensorflow/python/keras/engine/base_preprocessing_layer_test.py @@ -60,6 +60,9 @@ class AddingPreprocessingLayer( dtype=dtypes.float32, initializer=init_ops.zeros_initializer) + def reset_state(self): + self._sum.assign([0.]) + def set_total(self, sum_value): """This is an example of how a subclass would implement a direct setter. @@ -137,8 +140,12 @@ class PreprocessingLayerTest(keras_parameterized.TestCase): input_dataset = {"foo": 0} layer = get_layer() - with self.assertRaisesRegex(ValueError, "requires a"): - layer.adapt(input_dataset) + if context.executing_eagerly(): + with self.assertRaisesRegex(ValueError, "Failed to find data adapter"): + layer.adapt(input_dataset) + else: + with self.assertRaisesRegex(ValueError, "requires a"): + layer.adapt(input_dataset) def test_adapt_infinite_dataset_fails(self): """Test that preproc layers fail if an infinite dataset is passed.""" @@ -146,8 +153,13 @@ class PreprocessingLayerTest(keras_parameterized.TestCase): np.array([[1], [2], [3], [4], [5], [0]])).repeat() layer = get_layer() - with self.assertRaisesRegex(ValueError, ".*infinite number of elements.*"): - layer.adapt(input_dataset) + if context.executing_eagerly(): + with self.assertRaisesRegex(ValueError, "infinite dataset"): + layer.adapt(input_dataset) + else: + with self.assertRaisesRegex(ValueError, + ".*infinite number of elements.*"): + layer.adapt(input_dataset) def test_pre_build_injected_update_with_no_build_fails(self): """Test external update injection before build() is called fails.""" @@ -248,17 +260,6 @@ class PreprocessingLayerTest(keras_parameterized.TestCase): self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.])) - def test_adapt_dataset_of_tuples_fails(self): - """Test that preproc layers can adapt() before build() is called.""" - input_dataset = dataset_ops.Dataset.from_tensor_slices(( - np.array([[1], [2], [3], [4], [5], [0]]), - np.array([[1], [2], [3], [4], [5], [0]]))) - - layer = get_layer() - - with self.assertRaisesRegex(TypeError, "single-Tensor elements"): - layer.adapt(input_dataset) - def test_post_build_adapt_update_dataset(self): """Test that preproc layers can adapt() after build() is called.""" input_dataset = dataset_ops.Dataset.from_tensor_slices( diff --git a/tensorflow/python/keras/engine/base_preprocessing_layer_v1.py b/tensorflow/python/keras/engine/base_preprocessing_layer_v1.py index f603fac25c3..50bb834bcc1 100644 --- a/tensorflow/python/keras/engine/base_preprocessing_layer_v1.py +++ b/tensorflow/python/keras/engine/base_preprocessing_layer_v1.py @@ -17,10 +17,18 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +import numpy as np + from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops +from tensorflow.python.framework import type_spec from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import base_preprocessing_layer +from tensorflow.python.keras.engine import training_generator_v1 +from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import state_ops +from tensorflow.python.ops.ragged import ragged_tensor class CombinerPreprocessingLayer( @@ -46,6 +54,10 @@ class CombinerPreprocessingLayer( from tf.keras.[...].CombinerPreprocessingLayer will get the right symbol. """ + def __init__(self, combiner, **kwargs): + super(CombinerPreprocessingLayer, self).__init__(combiner, **kwargs) + self._previously_updated = False + def _restore_updates(self): """Recreates a dict of updates from the layer's weights.""" data_dict = {} @@ -72,3 +84,93 @@ class CombinerPreprocessingLayer( assignments.append( state_ops.assign(self.state_variables[var_name], value)) K.get_session().run(assignments) + + def adapt(self, data, reset_state=True): + """Fits the state of the preprocessing layer to the data being passed. + + Args: + data: The data to train on. It can be passed either as a tf.data Dataset, + or as a numpy array. + reset_state: Optional argument specifying whether to clear the state of + the layer at the start of the call to `adapt`, or whether to start from + the existing state. Subclasses may choose to throw if reset_state is set + to 'False'. + """ + if reset_state: + accumulator = None + else: + accumulator = self._combiner.restore(self._restore_updates()) + if isinstance(data, (list, tuple)): + data = ops.convert_to_tensor_v2_with_dispatch(data) + if not isinstance(data, (dataset_ops.DatasetV2, np.ndarray, ops.Tensor, + ragged_tensor.RaggedTensor)): + raise ValueError('`adapt()` requires a batched Dataset, a Tensor, ' + 'or a Numpy array as input, ' + 'got {}'.format(type(data))) + + if isinstance(data, dataset_ops.DatasetV2): + # Validate that the dataset only contains single-tensor elements. + if not isinstance(data.element_spec, type_spec.TypeSpec): + raise TypeError( + 'The dataset should yield single-Tensor elements. Use `dataset.map`' + 'to select the element of interest.\n' + 'Got dataset.element_spec=' + str(data.element_spec)) + # Validate the datasets to try and ensure we haven't been passed one with + # infinite size. That would cause an infinite loop here. + if tf_utils.dataset_is_infinite(data): + raise ValueError( + 'The dataset passed to `adapt()` has an infinite number of ' + 'elements. Please use `dataset.take(...)` to make the number ' + 'of elements finite.') + next_data = self._get_dataset_iterator(data) + # TODO(fchollet): consider checking if the dataset is already batched + # and otherwise batching it. + elif isinstance(data, (ops.Tensor, ragged_tensor.RaggedTensor)): + next_data = self._get_dataset_iterator( + dataset_ops.Dataset.from_tensor_slices(data).batch(512)) + else: + generator, _ = training_generator_v1.convert_to_generator_like( + data, batch_size=512) + # If the data is not a dataset, we can iterate over it using next(foo); + # here, we wrap that into a callable. + next_data = lambda: next(generator) + + # TODO(momernick): Some sort of status bar? + # TODO(momernick): Implement parallel processing here? + try: + data_element = next_data() + + # First, see if the layer is built or not. If it is not, then we must + # build it. + if not self.built: + try: + # If this is a Numpy array or tensor, we can get shape from .shape. + # If not, an attribute error will be thrown. + data_shape = data_element.shape + data_shape_nones = tuple([None] * len(data_element.shape)) + except AttributeError: + # The input has an unknown number of dimensions. + data_shape = None + data_shape_nones = None + + # TODO (b/159261555): move this to base layer build. + batch_input_shape = getattr(self, '_batch_input_shape', None) + if batch_input_shape is None: + # Set the number of dimensions. + self._batch_input_shape = data_shape_nones + + self.build(data_shape) + + # Once we have built the Layer, we can process the input data. We do so + # until we've gotten an exception indicating that we have no more data. + while True: + accumulator = self._combiner.compute(data_element, accumulator) + data_element = next_data() + # Note that this belongs to the outer indentation of 'try' - we need to + # catch exceptions resulting from the first 'next_data()' invocation as + # well. + except (StopIteration, errors.OutOfRangeError): + pass + + updates = self._combiner.extract(accumulator) + self._set_state_variables(updates) diff --git a/tensorflow/python/keras/engine/data_adapter.py b/tensorflow/python/keras/engine/data_adapter.py index 3e62f53c5cd..2eccee8959f 100644 --- a/tensorflow/python/keras/engine/data_adapter.py +++ b/tensorflow/python/keras/engine/data_adapter.py @@ -1079,7 +1079,30 @@ class DataHandler(object): workers=1, use_multiprocessing=False, model=None, - steps_per_execution=None): + steps_per_execution=None, + distribute=True): + """Initializes a `DataHandler`. + + Arguments: + x: See `Model.fit`. + y: See `Model.fit`. + sample_weight: See `Model.fit`. + batch_size: See `Model.fit`. + steps_per_epoch: See `Model.fit`. + initial_epoch: See `Model.fit`. + epochs: See `Model.fit`. + shuffle: See `Model.fit`. + class_weight: See `Model.fit`. + max_queue_size: See `Model.fit`. + workers: See `Model.fit`. + use_multiprocessing: See `Model.fit`. + model: The `Model` instance. Needed in order to correctly `build` the + `Model` using generator-like inputs (see `GeneratorDataAdapter`). + steps_per_execution: See `Model.compile`. + distribute: Whether to distribute the `tf.dataset`. + `PreprocessingLayer.adapt` does not support distributed datasets, + `Model` should always set this to `True`. + """ self._initial_epoch = initial_epoch self._epochs = epochs @@ -1117,7 +1140,9 @@ class DataHandler(object): dataset = dataset.map(_make_class_weight_map_fn(class_weight)) self._inferred_steps = self._infer_steps(steps_per_epoch, dataset) - if not _is_distributed_dataset(dataset): + # `PreprocessingLayer.adapt` does not currently support distributed + # datasets, so we pass `distribute=False` there. + if distribute and not _is_distributed_dataset(dataset): dataset = strategy.experimental_distribute_dataset(dataset) self._dataset = dataset diff --git a/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py b/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py index e5ba0bd1e17..8c7516bf9df 100644 --- a/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py +++ b/tensorflow/python/keras/layers/preprocessing/category_encoding_test.py @@ -753,7 +753,8 @@ class CategoryEncodingCombinerTest( _ = layer([1, 2, 3]) def test_saving_loading(self): - encoder = category_encoding.CategoryEncoding() + cls = get_layer_class() + encoder = cls() encoder.adapt([1, 2, 3]) model = keras.Sequential([encoder]) model.save("/tmp/model", save_format="tf") @@ -761,7 +762,8 @@ class CategoryEncodingCombinerTest( self.assertAllClose(model.predict([[1]]), loaded_model.predict([[1]])) def test_serialize(self): - encoder = category_encoding.CategoryEncoding() + cls = get_layer_class() + encoder = cls() encoder.adapt([1, 2, 3]) model = keras.Sequential([encoder]) _ = keras.models.clone_model(model) diff --git a/tensorflow/python/keras/layers/preprocessing/normalization_test.py b/tensorflow/python/keras/layers/preprocessing/normalization_test.py index f629b88f369..e7da7d9a0b6 100644 --- a/tensorflow/python/keras/layers/preprocessing/normalization_test.py +++ b/tensorflow/python/keras/layers/preprocessing/normalization_test.py @@ -276,46 +276,6 @@ class NormalizationTest(keras_parameterized.TestCase, mean=variables.Variable([1.0]), variance=variables.Variable([2.0])) - def test_mean_setting_continued_adapt_failure(self): - - if not context.executing_eagerly(): - self.skipTest("'assign' doesn't work in V1, so don't test in V1.") - - cls = get_layer_class() - layer = cls(axis=-1) - layer.build((None, 2)) - layer.mean.assign([1.3, 2.0]) - with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"): - layer.adapt(np.array([[1, 2]]), reset_state=False) - - def test_var_setting_continued_adapt_failure(self): - - if not context.executing_eagerly(): - self.skipTest("'assign' doesn't work in V1, so don't test in V1.") - - cls = get_layer_class() - layer = cls(axis=-1) - layer.build((None, 2)) - layer.variance.assign([1.3, 2.0]) - with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"): - layer.adapt(np.array([[1, 2]]), reset_state=False) - - def test_weight_setting_continued_adapt_failure(self): - cls = get_layer_class() - layer = cls(axis=-1) - layer.build((None, 2)) - layer.set_weights([np.array([1.3, 2.0]), np.array([0.0, 1.0]), np.array(0)]) - with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"): - layer.adapt(np.array([[1, 2]]), reset_state=False) - - def test_weight_setting_no_count_continued_adapt_failure(self): - cls = get_layer_class() - layer = cls(axis=-1) - layer.build((None, 2)) - layer.set_weights([np.array([1.3, 2.0]), np.array([0.0, 1.0])]) - with self.assertRaisesRegex(RuntimeError, "without also setting 'count'"): - layer.adapt(np.array([[1, 2]]), reset_state=False) - def test_1d_data(self): data = [0, 2, 0, 2] cls = get_layer_class() diff --git a/tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py b/tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py index 4e297e7c029..651908a8073 100644 --- a/tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py +++ b/tensorflow/python/keras/layers/preprocessing/preprocessing_stage.py @@ -29,8 +29,9 @@ from tensorflow.python.keras.utils import tf_utils from tensorflow.python.util import nest -class PreprocessingStage(base_preprocessing_layer.PreprocessingLayer, - sequential.Sequential): +# Sequential methods should take precedence. +class PreprocessingStage(sequential.Sequential, + base_preprocessing_layer.PreprocessingLayer): """A sequential preprocessing stage. This preprocessing stage wraps a list of preprocessing layers into a @@ -96,8 +97,9 @@ class PreprocessingStage(base_preprocessing_layer.PreprocessingLayer, reset_state=reset_state) -class FunctionalPreprocessingStage(base_preprocessing_layer.PreprocessingLayer, - functional.Functional): +# Functional methods shoud take precedence. +class FunctionalPreprocessingStage(functional.Functional, + base_preprocessing_layer.PreprocessingLayer): """A functional preprocessing stage. This preprocessing stage wraps a graph of preprocessing layers into a diff --git a/tensorflow/python/keras/layers/preprocessing/preprocessing_stage_test.py b/tensorflow/python/keras/layers/preprocessing/preprocessing_stage_test.py index 1cc48be1e3d..4a72319bd14 100644 --- a/tensorflow/python/keras/layers/preprocessing/preprocessing_stage_test.py +++ b/tensorflow/python/keras/layers/preprocessing/preprocessing_stage_test.py @@ -24,9 +24,6 @@ import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras.engine import base_preprocessing_layer -from tensorflow.python.keras.layers import convolutional -from tensorflow.python.keras.layers.preprocessing import image_preprocessing -from tensorflow.python.keras.layers.preprocessing import normalization from tensorflow.python.keras.layers.preprocessing import preprocessing_stage from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils from tensorflow.python.ops import array_ops @@ -86,20 +83,6 @@ class PreprocessingStageTest( with self.assertRaisesRegex(ValueError, 'requires a '): stage.adapt(None) - def test_mixing_preprocessing_and_regular_layers(self): - stage = preprocessing_stage.PreprocessingStage([ - image_preprocessing.CenterCrop(16, 16), - normalization.Normalization(), - convolutional.Conv2D(4, 3) - ]) - data = np.ones((16, 20, 20, 3), dtype='float32') - stage.adapt(data) - _ = stage(data) - stage.compile('rmsprop', 'mse') - stage.fit(data, np.ones((16, 14, 14, 4))) - _ = stage.evaluate(data, np.ones((16, 14, 14, 4))) - _ = stage.predict(data) - if __name__ == '__main__': test.main() diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-category-crossing.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-category-crossing.pbtxt index 1b5f2fc7a10..575e3e35f08 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-category-crossing.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-category-crossing.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,14 +244,30 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "partial_crossing" argspec: "args=[\'self\', \'partial_inputs\', \'ragged_out\', \'sparse_out\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-category-encoding.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-category-encoding.pbtxt index 79946192d5b..3cf5dd904f0 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-category-encoding.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-category-encoding.pbtxt @@ -51,6 +51,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -95,6 +99,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -171,6 +179,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'count_weights\'], varargs=None, keywords=None, defaults=[\'None\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -187,6 +199,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -231,6 +247,18 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_num_elements" argspec: "args=[\'self\', \'num_elements\'], varargs=None, keywords=None, defaults=None" @@ -243,6 +271,10 @@ tf_class { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt index d09d0f85402..87cea69230c 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-discretization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-discretization.pbtxt index 87c0e792cfd..ff4b67620c2 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-discretization.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-discretization.pbtxt @@ -53,6 +53,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -97,6 +101,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -139,7 +147,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -173,6 +181,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -189,6 +201,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -233,10 +249,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-hashing.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-hashing.pbtxt index dbd8d4fe10a..ac173d6cbe6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-hashing.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-hashing.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt index 464ca87b9bc..f5b6eead417 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt @@ -53,6 +53,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -97,6 +101,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -173,6 +181,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -189,6 +201,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -237,6 +253,18 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_vocabulary" argspec: "args=[\'self\', \'vocab\'], varargs=None, keywords=None, defaults=None" @@ -245,6 +273,10 @@ tf_class { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "vocab_size" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt index 1949b11b1ef..28705ec4868 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt @@ -51,6 +51,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -95,6 +99,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -171,6 +179,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -187,6 +199,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -231,10 +247,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt index 4e4faede9a2..f5ad8914df6 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt @@ -47,6 +47,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -91,6 +95,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -129,11 +137,11 @@ tf_class { } member_method { name: "__init__" - argspec: "args=[\'self\', \'trainable\', \'name\', \'dtype\', \'dynamic\'], varargs=None, keywords=kwargs, defaults=[\'True\', \'None\', \'None\', \'False\'], " + argspec: "args=[\'self\', \'stateful\', \'streaming\'], varargs=None, keywords=kwargs, defaults=[\'False\', \'True\'], " } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -167,6 +175,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -183,6 +195,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -227,10 +243,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt index f5c73972d6a..581f53d2163 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt index 55280e81038..82fc1918b35 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt index 69fb7bea570..8219f5b2171 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt index 80d7fda8d0d..76c813930d4 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt index 131bc4b3efd..3fce39b8c4f 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt index c212112a64f..560c4caef3d 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt index de032c6566c..8ba03b5ec5d 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-zoom.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-zoom.pbtxt index ab55f40e8fc..13acf89033c 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-zoom.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-random-zoom.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt index 96882e04598..3fa626ff348 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt index 3dc758729eb..467b2863564 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt index 515f976574b..7f388e1a8eb 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt @@ -53,6 +53,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -97,6 +101,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -173,6 +181,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -189,6 +201,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -237,6 +253,18 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_vocabulary" argspec: "args=[\'self\', \'vocab\'], varargs=None, keywords=None, defaults=None" @@ -245,6 +273,10 @@ tf_class { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "vocab_size" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt index 69ae8d67722..42ef98a2c12 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt @@ -51,6 +51,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -95,6 +99,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -171,6 +179,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -187,6 +199,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -235,6 +251,18 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_vocabulary" argspec: "args=[\'self\', \'vocab\', \'df_data\', \'oov_df_value\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " @@ -243,6 +271,10 @@ tf_class { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-category-crossing.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-category-crossing.pbtxt index 1b5f2fc7a10..575e3e35f08 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-category-crossing.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-category-crossing.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,14 +244,30 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "partial_crossing" argspec: "args=[\'self\', \'partial_inputs\', \'ragged_out\', \'sparse_out\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-category-encoding.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-category-encoding.pbtxt index 80d7a618df8..68d9bec8071 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-category-encoding.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-category-encoding.pbtxt @@ -49,6 +49,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -93,6 +97,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -169,6 +177,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'count_weights\'], varargs=None, keywords=None, defaults=[\'None\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -185,6 +197,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -229,6 +245,18 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_num_elements" argspec: "args=[\'self\', \'num_elements\'], varargs=None, keywords=None, defaults=None" @@ -241,6 +269,10 @@ tf_class { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt index d09d0f85402..87cea69230c 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-center-crop.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-discretization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-discretization.pbtxt index 87c0e792cfd..ff4b67620c2 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-discretization.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-discretization.pbtxt @@ -53,6 +53,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -97,6 +101,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -139,7 +147,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -173,6 +181,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -189,6 +201,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -233,10 +249,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-hashing.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-hashing.pbtxt index dbd8d4fe10a..ac173d6cbe6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-hashing.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-hashing.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt index cad7b1f8950..56e4f8cb09d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt @@ -50,6 +50,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -94,6 +98,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -170,6 +178,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -186,6 +198,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -234,6 +250,18 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_vocabulary" argspec: "args=[\'self\', \'vocab\'], varargs=None, keywords=None, defaults=None" @@ -242,6 +270,10 @@ tf_class { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "vocab_size" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt index 3d7a2621eee..3a02ca473e3 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-normalization.pbtxt @@ -49,6 +49,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -93,6 +97,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -135,7 +143,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -169,6 +177,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -185,6 +197,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -229,10 +245,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt index 4e4faede9a2..f5ad8914df6 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-preprocessing-layer.pbtxt @@ -47,6 +47,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -91,6 +95,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -129,11 +137,11 @@ tf_class { } member_method { name: "__init__" - argspec: "args=[\'self\', \'trainable\', \'name\', \'dtype\', \'dynamic\'], varargs=None, keywords=kwargs, defaults=[\'True\', \'None\', \'None\', \'False\'], " + argspec: "args=[\'self\', \'stateful\', \'streaming\'], varargs=None, keywords=kwargs, defaults=[\'False\', \'True\'], " } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -167,6 +175,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -183,6 +195,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -227,10 +243,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt index f5c73972d6a..581f53d2163 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-contrast.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt index 55280e81038..82fc1918b35 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-crop.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt index 69fb7bea570..8219f5b2171 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-flip.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt index 80d7fda8d0d..76c813930d4 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-height.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt index 131bc4b3efd..3fce39b8c4f 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-rotation.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt index c212112a64f..560c4caef3d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-translation.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt index de032c6566c..8ba03b5ec5d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-width.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-zoom.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-zoom.pbtxt index ab55f40e8fc..13acf89033c 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-zoom.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-random-zoom.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'True\'], " } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt index 96882e04598..3fa626ff348 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-rescaling.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt index 3dc758729eb..467b2863564 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-resizing.pbtxt @@ -48,6 +48,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -92,6 +96,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -134,7 +142,7 @@ tf_class { } member_method { name: "adapt" - argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + argspec: "args=[\'self\', \'data\', \'batch_size\', \'steps\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\'], " } member_method { name: "add_loss" @@ -168,6 +176,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -184,6 +196,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -228,10 +244,26 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt index f90569c1475..2b5e804cb6b 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt @@ -50,6 +50,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -94,6 +98,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -170,6 +178,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -186,6 +198,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -234,6 +250,18 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_vocabulary" argspec: "args=[\'self\', \'vocab\'], varargs=None, keywords=None, defaults=None" @@ -242,6 +270,10 @@ tf_class { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "vocab_size" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt index c5eab66f364..3928501329d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-text-vectorization.pbtxt @@ -49,6 +49,10 @@ tf_class { name: "input_spec" mtype: "" } + member { + name: "is_adapted" + mtype: "" + } member { name: "losses" mtype: "" @@ -93,6 +97,10 @@ tf_class { name: "stateful" mtype: "" } + member { + name: "streaming" + mtype: "" + } member { name: "submodules" mtype: "" @@ -169,6 +177,10 @@ tf_class { name: "call" argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "compile" + argspec: "args=[\'self\', \'run_eagerly\', \'steps_per_execution\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " + } member_method { name: "compute_mask" argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " @@ -185,6 +197,10 @@ tf_class { name: "count_params" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "finalize_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "from_config" argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" @@ -233,6 +249,18 @@ tf_class { name: "get_weights" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "make_adapt_function" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "merge_state" + argspec: "args=[\'self\', \'layers\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "reset_state" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "set_vocabulary" argspec: "args=[\'self\', \'vocab\', \'df_data\', \'oov_df_value\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], " @@ -241,6 +269,10 @@ tf_class { name: "set_weights" argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "update_state" + argspec: "args=[\'self\', \'data\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "with_name_scope" argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None"