diff --git a/tensorflow/python/keras/layers/__init__.py b/tensorflow/python/keras/layers/__init__.py index 36e58ef6552..8ce1c7d8224 100644 --- a/tensorflow/python/keras/layers/__init__.py +++ b/tensorflow/python/keras/layers/__init__.py @@ -47,19 +47,31 @@ if tf2.enabled(): from tensorflow.python.keras.layers.preprocessing.category_encoding import CategoryEncoding from tensorflow.python.keras.layers.preprocessing.category_encoding_v1 import CategoryEncoding as CategoryEncodingV1 CategoryEncodingV2 = CategoryEncoding + from tensorflow.python.keras.layers.preprocessing.integer_lookup import IntegerLookup + from tensorflow.python.keras.layers.preprocessing.integer_lookup_v1 import IntegerLookup as IntegerLookupV1 + IntegerLookupV2 = IntegerLookup from tensorflow.python.keras.layers.preprocessing.normalization import Normalization from tensorflow.python.keras.layers.preprocessing.normalization_v1 import Normalization as NormalizationV1 NormalizationV2 = Normalization + from tensorflow.python.keras.layers.preprocessing.string_lookup import StringLookup + from tensorflow.python.keras.layers.preprocessing.string_lookup_v1 import StringLookup as StringLookupV1 + StringLookupV2 = StringLookup from tensorflow.python.keras.layers.preprocessing.text_vectorization import TextVectorization from tensorflow.python.keras.layers.preprocessing.text_vectorization_v1 import TextVectorization as TextVectorizationV1 TextVectorizationV2 = TextVectorization else: + from tensorflow.python.keras.layers.preprocessing.integer_lookup_v1 import IntegerLookup + from tensorflow.python.keras.layers.preprocessing.integer_lookup import IntegerLookup as IntegerLookupV2 + IntegerLookupV1 = IntegerLookup from tensorflow.python.keras.layers.preprocessing.category_encoding_v1 import CategoryEncoding from tensorflow.python.keras.layers.preprocessing.category_encoding import CategoryEncoding as CategoryEncodingV2 CategoryEncodingV1 = CategoryEncoding from tensorflow.python.keras.layers.preprocessing.normalization_v1 import Normalization from tensorflow.python.keras.layers.preprocessing.normalization import Normalization as NormalizationV2 NormalizationV1 = Normalization + from tensorflow.python.keras.layers.preprocessing.string_lookup_v1 import StringLookup + from tensorflow.python.keras.layers.preprocessing.string_lookup import StringLookup as StringLookupV2 + StringLookupV1 = StringLookup from tensorflow.python.keras.layers.preprocessing.text_vectorization_v1 import TextVectorization from tensorflow.python.keras.layers.preprocessing.text_vectorization import TextVectorization as TextVectorizationV2 TextVectorizationV1 = TextVectorization diff --git a/tensorflow/python/keras/layers/preprocessing/integer_lookup.py b/tensorflow/python/keras/layers/preprocessing/integer_lookup.py index 6f497983408..3512b9988c1 100644 --- a/tensorflow/python/keras/layers/preprocessing/integer_lookup.py +++ b/tensorflow/python/keras/layers/preprocessing/integer_lookup.py @@ -20,8 +20,10 @@ from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.keras.layers.preprocessing import index_lookup from tensorflow.python.keras.layers.preprocessing import table_utils +from tensorflow.python.util.tf_export import keras_export +@keras_export("keras.layers.experimental.preprocessing.IntegerLookup", v1=[]) class IntegerLookup(index_lookup.IndexLookup): """Maps integers from a vocabulary to integer indices. @@ -39,18 +41,18 @@ class IntegerLookup(index_lookup.IndexLookup): Attributes: max_values: The maximum size of the vocabulary for this layer. If None, there is no cap on the size of the vocabulary. Note that this vocabulary - includes the OOV and mask tokens, so the effective number of tokens is - (max_tokens - num_oov_tokens - (1 if mask_token else 0)) + includes the OOV and mask values, so the effective number of values is + (max_values - num_oov_values - (1 if mask_token else 0)) num_oov_indices: The number of out-of-vocabulary values to use; defaults to - 1. If this value is more than 1, OOV inputs are hashed to determine their - OOV value; if this value is 0, passing an OOV input will result in a '-1' - being returned for that value in the output tensor. (Note that, because - the value is -1 and not 0, this will allow you to effectively drop OOV - values from categorical encodings.) + 1. If this value is more than 1, OOV inputs are modulated to determine + their OOV value; if this value is 0, passing an OOV input will result in + a '-1' being returned for that value in the output tensor. (Note that, + because the value is -1 and not 0, this will allow you to effectively drop + OOV values from categorical encodings.) mask_value: A value that represents masked inputs, and which is mapped to index 0. Defaults to 0. If set to None, no mask term will be added and the - OOV tokens, if any, will be indexed from (0...num_oov_tokens) instead of - (1...num_oov_tokens+1). + OOV values, if any, will be indexed from (0...num_oov_values) instead of + (1...num_oov_values+1). oov_value: The value representing an out-of-vocabulary value. Defaults to -1. vocabulary: An optional list of values, or a path to a text file containing @@ -87,7 +89,7 @@ class IntegerLookup(index_lookup.IndexLookup): [0, -1, 42, 1138, 1000, 36, 12] Note how the mask value 0 and the OOV value -1 have been added to the - vocabulary. The remaining tokens are sorted by frequency (1138, which has + vocabulary. The remaining values are sorted by frequency (1138, which has 2 occurrences, is first) then by inverse sort order. >>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]]) @@ -99,6 +101,27 @@ class IntegerLookup(index_lookup.IndexLookup): [2, 4, 5]])> + Lookups with multiple OOV tokens. + + This example demonstrates how to use a lookup layer with multiple OOV tokens. + When a layer is created with more than one OOV token, any OOV values are + hashed into the number of OOV buckets, distributing OOV values in a + deterministic fashion across the set. + + >>> vocab = [12, 36, 1138, 42] + >>> data = tf.constant([[12, 1138, 42], [37, 1000, 36]]) + >>> layer = IntegerLookup(vocabulary=vocab, num_oov_indices=2) + >>> layer(data) + + + Note that the output for OOV value 37 is 2, while the output for OOV value + 1000 is 1. The in-vocab terms have their output index increased by 1 from + earlier examples (12 maps to 3, etc) in order to make space for the extra OOV + value. + + Inverse lookup This example demonstrates how to map indices to values using this layer. (You diff --git a/tensorflow/python/keras/layers/preprocessing/integer_lookup_v1.py b/tensorflow/python/keras/layers/preprocessing/integer_lookup_v1.py index ec326f4d78b..2a86e9d56b0 100644 --- a/tensorflow/python/keras/layers/preprocessing/integer_lookup_v1.py +++ b/tensorflow/python/keras/layers/preprocessing/integer_lookup_v1.py @@ -19,7 +19,9 @@ from __future__ import print_function from tensorflow.python.keras.layers.preprocessing import index_lookup_v1 from tensorflow.python.keras.layers.preprocessing import integer_lookup +from tensorflow.python.util.tf_export import keras_export +@keras_export(v1=["keras.layers.experimental.preprocessing.IntegerLookup"]) class IntegerLookup(integer_lookup.IntegerLookup, index_lookup_v1.IndexLookup): pass diff --git a/tensorflow/python/keras/layers/preprocessing/string_lookup.py b/tensorflow/python/keras/layers/preprocessing/string_lookup.py index a420de8678a..d772f57aa4d 100644 --- a/tensorflow/python/keras/layers/preprocessing/string_lookup.py +++ b/tensorflow/python/keras/layers/preprocessing/string_lookup.py @@ -20,8 +20,10 @@ from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.keras.layers.preprocessing import index_lookup from tensorflow.python.keras.layers.preprocessing import table_utils +from tensorflow.python.util.tf_export import keras_export +@keras_export("keras.layers.experimental.preprocessing.StringLookup", v1=[]) class StringLookup(index_lookup.IndexLookup): """Maps strings from a vocabulary to integer indices. @@ -52,7 +54,7 @@ class StringLookup(index_lookup.IndexLookup): will be added and the OOV tokens, if any, will be indexed from (0...num_oov_indices) instead of (1...num_oov_indices+1). oov_token: The token representing an out-of-vocabulary value. Defaults to - "[OOV]". + "[UNK]". vocabulary: An optional list of vocabulary terms, or a path to a text file containing a vocabulary to load into this layer. The file should contain one token per line. If the list or file contains the same token multiple @@ -85,9 +87,9 @@ class StringLookup(index_lookup.IndexLookup): >>> layer = StringLookup() >>> layer.adapt(data) >>> layer.get_vocabulary() - ['', '[OOV]', 'd', 'z', 'c', 'b', 'a'] + ['', '[UNK]', 'd', 'z', 'c', 'b', 'a'] - Note how the mask token '' and the OOV token [OOV] have been added to the + Note how the mask token '' and the OOV token [UNK] have been added to the vocabulary. The remaining tokens are sorted by frequency ('d', which has 2 occurrences, is first) then by inverse sort order. @@ -99,6 +101,25 @@ class StringLookup(index_lookup.IndexLookup): array([[6, 4, 2], [2, 3, 5]])> + Lookups with multiple OOV tokens. + + This example demonstrates how to use a lookup layer with multiple OOV tokens. + When a layer is created with more than one OOV token, any OOV values are + hashed into the number of OOV buckets, distributing OOV values in a + deterministic fashion across the set. + + >>> vocab = ["a", "b", "c", "d"] + >>> data = tf.constant([["a", "c", "d"], ["m", "z", "b"]]) + >>> layer = StringLookup(vocabulary=vocab, num_oov_indices=2) + >>> layer(data) + + + Note that the output for OOV value 'm' is 1, while the output for OOV value + 'z' is 2. The in-vocab terms have their output index increased by 1 from + earlier examples (a maps to 3, etc) in order to make space for the extra OOV + value. Inverse lookup @@ -112,7 +133,7 @@ class StringLookup(index_lookup.IndexLookup): >>> layer(data) + [b'd', b'[UNK]', b'b']], dtype=object)> Note that the integer 5, which is out of the vocabulary space, returns an OOV token. @@ -131,9 +152,9 @@ class StringLookup(index_lookup.IndexLookup): >>> i_layer(int_data) + [b'd', b'[UNK]', b'b']], dtype=object)> - In this example, the input value 'z' resulted in an output of '[OOV]', since + In this example, the input value 'z' resulted in an output of '[UNK]', since 1000 was not in the vocabulary - it got represented as an OOV, and all OOV values are returned as '[OOV}' in the inverse layer. Also, note that for the inverse to work, you must have already set the forward layer vocabulary @@ -144,9 +165,9 @@ class StringLookup(index_lookup.IndexLookup): max_tokens=None, num_oov_indices=1, mask_token="", - oov_token="[OOV]", + oov_token="[UNK]", vocabulary=None, - encoding="utf-8", + encoding=None, invert=False, **kwargs): allowed_dtypes = [dtypes.string] @@ -158,6 +179,9 @@ class StringLookup(index_lookup.IndexLookup): if "dtype" not in kwargs: kwargs["dtype"] = dtypes.string + if encoding is None: + encoding = "utf-8" + if vocabulary is not None: if isinstance(vocabulary, str): vocabulary = table_utils.get_vocabulary_from_file(vocabulary, encoding) diff --git a/tensorflow/python/keras/layers/preprocessing/string_lookup_test.py b/tensorflow/python/keras/layers/preprocessing/string_lookup_test.py index 0b9081d815c..2b45b59fcf4 100644 --- a/tensorflow/python/keras/layers/preprocessing/string_lookup_test.py +++ b/tensorflow/python/keras/layers/preprocessing/string_lookup_test.py @@ -36,6 +36,7 @@ from tensorflow.python.keras.layers.preprocessing import string_lookup from tensorflow.python.keras.layers.preprocessing import string_lookup_v1 from tensorflow.python.keras.saving import save from tensorflow.python.keras.utils.generic_utils import CustomObjectScope +from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import gfile from tensorflow.python.platform import test @@ -155,7 +156,7 @@ class StringLookupVocabularyTest(keras_parameterized.TestCase, def test_get_vocab_returns_str(self): vocab_data = ["earth", "wind", "and", "fire"] - expected_vocab = ["", "[OOV]", "earth", "wind", "and", "fire"] + expected_vocab = ["", "[UNK]", "earth", "wind", "and", "fire"] layer = get_layer_class()(vocabulary=vocab_data) layer_vocab = layer.get_vocabulary() self.assertAllEqual(expected_vocab, layer_vocab) @@ -205,7 +206,7 @@ class StringLookupVocabularyTest(keras_parameterized.TestCase, input_array = np.array([["earth", "wind", "and", "fire"], ["fire", "and", "earth", "michigan"]]) expected_output = np.array([["earth", "wind", "and", "fire"], - ["fire", "and", "earth", "[OOV]"]]) + ["fire", "and", "earth", "[UNK]"]]) input_data = keras.Input(shape=(None,), dtype=dtypes.string) layer = get_layer_class()(vocabulary=vocab_data) @@ -217,6 +218,21 @@ class StringLookupVocabularyTest(keras_parameterized.TestCase, output_dataset = model.predict(input_array) self.assertAllEqual(expected_output, output_dataset) + def test_ragged_string_input_multi_bucket(self): + vocab_data = ["earth", "wind", "and", "fire"] + input_array = ragged_factory_ops.constant([["earth", "wind", "fire"], + ["fire", "and", "earth", + "ohio"]]) + expected_output = [[3, 4, 6], [6, 5, 3, 2]] + + input_data = keras.Input(shape=(None,), dtype=dtypes.string, ragged=True) + layer = get_layer_class()(num_oov_indices=2) + layer.set_vocabulary(vocab_data) + int_data = layer(input_data) + model = keras.Model(inputs=input_data, outputs=int_data) + output_dataset = model.predict(input_array) + self.assertAllEqual(expected_output, output_dataset) + @keras_parameterized.run_all_keras_modes(always_skip_eager=True) class StringLookupSaveableTest(keras_parameterized.TestCase, diff --git a/tensorflow/python/keras/layers/preprocessing/string_lookup_v1.py b/tensorflow/python/keras/layers/preprocessing/string_lookup_v1.py index 0d4c70de655..3b5d0679372 100644 --- a/tensorflow/python/keras/layers/preprocessing/string_lookup_v1.py +++ b/tensorflow/python/keras/layers/preprocessing/string_lookup_v1.py @@ -19,7 +19,9 @@ from __future__ import print_function from tensorflow.python.keras.layers.preprocessing import index_lookup_v1 from tensorflow.python.keras.layers.preprocessing import string_lookup +from tensorflow.python.util.tf_export import keras_export +@keras_export(v1=["keras.layers.experimental.preprocessing.StringLookup"]) class StringLookup(string_lookup.StringLookup, index_lookup_v1.IndexLookup): pass diff --git a/tensorflow/python/keras/layers/serialization.py b/tensorflow/python/keras/layers/serialization.py index 6b58a08a4bf..d990f2075c8 100644 --- a/tensorflow/python/keras/layers/serialization.py +++ b/tensorflow/python/keras/layers/serialization.py @@ -51,8 +51,12 @@ from tensorflow.python.keras.layers.preprocessing import category_encoding_v1 from tensorflow.python.keras.layers.preprocessing import discretization from tensorflow.python.keras.layers.preprocessing import hashing from tensorflow.python.keras.layers.preprocessing import image_preprocessing +from tensorflow.python.keras.layers.preprocessing import integer_lookup as preprocessing_integer_lookup +from tensorflow.python.keras.layers.preprocessing import integer_lookup_v1 as preprocessing_integer_lookup_v1 from tensorflow.python.keras.layers.preprocessing import normalization as preprocessing_normalization from tensorflow.python.keras.layers.preprocessing import normalization_v1 as preprocessing_normalization_v1 +from tensorflow.python.keras.layers.preprocessing import string_lookup as preprocessing_string_lookup +from tensorflow.python.keras.layers.preprocessing import string_lookup_v1 as preprocessing_string_lookup_v1 from tensorflow.python.keras.layers.preprocessing import text_vectorization as preprocessing_text_vectorization from tensorflow.python.keras.layers.preprocessing import text_vectorization_v1 as preprocessing_text_vectorization_v1 from tensorflow.python.keras.utils import generic_utils @@ -63,11 +67,13 @@ from tensorflow.python.util.tf_export import keras_export ALL_MODULES = (base_layer, input_layer, advanced_activations, convolutional, convolutional_recurrent, core, cudnn_recurrent, dense_attention, embeddings, einsum_dense, local, merge, noise, normalization, - pooling, image_preprocessing, preprocessing_normalization_v1, + pooling, image_preprocessing, preprocessing_integer_lookup_v1, + preprocessing_normalization_v1, preprocessing_string_lookup_v1, preprocessing_text_vectorization_v1, recurrent, wrappers, hashing, category_crossing, category_encoding_v1, discretization) ALL_V2_MODULES = (rnn_cell_wrapper_v2, normalization_v2, recurrent_v2, - preprocessing_normalization, preprocessing_text_vectorization, + preprocessing_integer_lookup, preprocessing_normalization, + preprocessing_string_lookup, preprocessing_text_vectorization, category_encoding) # ALL_OBJECTS is meant to be a global mutable. Hence we need to make it # thread-local to avoid concurrent mutations. diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.__metaclass__.pbtxt new file mode 100644 index 00000000000..409509cd4d2 --- /dev/null +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.__metaclass__.pbtxt @@ -0,0 +1,14 @@ +path: "tensorflow.keras.layers.experimental.preprocessing.IntegerLookup.__metaclass__" +tf_class { + is_instance: "" + member_method { + name: "__init__" + } + member_method { + name: "mro" + } + member_method { + name: "register" + argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt new file mode 100644 index 00000000000..c0052764039 --- /dev/null +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt @@ -0,0 +1,240 @@ +path: "tensorflow.keras.layers.experimental.preprocessing.IntegerLookup" +tf_class { + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + member { + name: "activity_regularizer" + mtype: "" + } + member { + name: "dtype" + mtype: "" + } + member { + name: "dynamic" + mtype: "" + } + member { + name: "inbound_nodes" + mtype: "" + } + member { + name: "input" + mtype: "" + } + member { + name: "input_mask" + mtype: "" + } + member { + name: "input_shape" + mtype: "" + } + member { + name: "input_spec" + mtype: "" + } + member { + name: "losses" + mtype: "" + } + member { + name: "metrics" + mtype: "" + } + member { + name: "name" + mtype: "" + } + member { + name: "name_scope" + mtype: "" + } + member { + name: "non_trainable_variables" + mtype: "" + } + member { + name: "non_trainable_weights" + mtype: "" + } + member { + name: "outbound_nodes" + mtype: "" + } + member { + name: "output" + mtype: "" + } + member { + name: "output_mask" + mtype: "" + } + member { + name: "output_shape" + mtype: "" + } + member { + name: "stateful" + mtype: "" + } + member { + name: "submodules" + mtype: "" + } + member { + name: "trainable" + mtype: "" + } + member { + name: "trainable_variables" + mtype: "" + } + member { + name: "trainable_weights" + mtype: "" + } + member { + name: "updates" + mtype: "" + } + member { + name: "variables" + mtype: "" + } + member { + name: "weights" + mtype: "" + } + member_method { + name: "__init__" + argspec: "args=[\'self\', \'max_values\', \'num_oov_indices\', \'mask_value\', \'oov_value\', \'vocabulary\', \'invert\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'1\', \'0\', \'-1\', \'None\', \'False\'], " + } + member_method { + name: "adapt" + argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + } + member_method { + name: "add_loss" + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" + } + member_method { + name: "add_metric" + argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'None\'], " + } + member_method { + name: "add_update" + argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + } + member_method { + name: "add_variable" + argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None" + } + member_method { + name: "add_weight" + argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], " + } + member_method { + name: "apply" + argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None" + } + member_method { + name: "build" + argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "call" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "compute_mask" + argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " + } + member_method { + name: "compute_output_shape" + argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "compute_output_signature" + argspec: "args=[\'self\', \'input_spec\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "count_params" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_mask_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_shape_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_losses_for" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_mask_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_shape_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_updates_for" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_vocabulary" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_weights" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "set_vocabulary" + argspec: "args=[\'self\', \'vocab\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "set_weights" + argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "vocab_size" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "with_name_scope" + argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.__metaclass__.pbtxt new file mode 100644 index 00000000000..4cb57350380 --- /dev/null +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.__metaclass__.pbtxt @@ -0,0 +1,14 @@ +path: "tensorflow.keras.layers.experimental.preprocessing.StringLookup.__metaclass__" +tf_class { + is_instance: "" + member_method { + name: "__init__" + } + member_method { + name: "mro" + } + member_method { + name: "register" + argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt new file mode 100644 index 00000000000..1deb932cb4e --- /dev/null +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt @@ -0,0 +1,240 @@ +path: "tensorflow.keras.layers.experimental.preprocessing.StringLookup" +tf_class { + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + member { + name: "activity_regularizer" + mtype: "" + } + member { + name: "dtype" + mtype: "" + } + member { + name: "dynamic" + mtype: "" + } + member { + name: "inbound_nodes" + mtype: "" + } + member { + name: "input" + mtype: "" + } + member { + name: "input_mask" + mtype: "" + } + member { + name: "input_shape" + mtype: "" + } + member { + name: "input_spec" + mtype: "" + } + member { + name: "losses" + mtype: "" + } + member { + name: "metrics" + mtype: "" + } + member { + name: "name" + mtype: "" + } + member { + name: "name_scope" + mtype: "" + } + member { + name: "non_trainable_variables" + mtype: "" + } + member { + name: "non_trainable_weights" + mtype: "" + } + member { + name: "outbound_nodes" + mtype: "" + } + member { + name: "output" + mtype: "" + } + member { + name: "output_mask" + mtype: "" + } + member { + name: "output_shape" + mtype: "" + } + member { + name: "stateful" + mtype: "" + } + member { + name: "submodules" + mtype: "" + } + member { + name: "trainable" + mtype: "" + } + member { + name: "trainable_variables" + mtype: "" + } + member { + name: "trainable_weights" + mtype: "" + } + member { + name: "updates" + mtype: "" + } + member { + name: "variables" + mtype: "" + } + member { + name: "weights" + mtype: "" + } + member_method { + name: "__init__" + argspec: "args=[\'self\', \'max_tokens\', \'num_oov_indices\', \'mask_token\', \'oov_token\', \'vocabulary\', \'encoding\', \'invert\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'1\', \'\', \'[UNK]\', \'None\', \'None\', \'False\'], " + } + member_method { + name: "adapt" + argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + } + member_method { + name: "add_loss" + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" + } + member_method { + name: "add_metric" + argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'None\'], " + } + member_method { + name: "add_update" + argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + } + member_method { + name: "add_variable" + argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None" + } + member_method { + name: "add_weight" + argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], " + } + member_method { + name: "apply" + argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None" + } + member_method { + name: "build" + argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "call" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "compute_mask" + argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " + } + member_method { + name: "compute_output_shape" + argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "compute_output_signature" + argspec: "args=[\'self\', \'input_spec\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "count_params" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_mask_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_shape_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_losses_for" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_mask_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_shape_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_updates_for" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_vocabulary" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_weights" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "set_vocabulary" + argspec: "args=[\'self\', \'vocab\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "set_weights" + argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "vocab_size" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "with_name_scope" + argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.pbtxt index 4a0522dc08f..94f6e3ba990 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.keras.layers.experimental.preprocessing.pbtxt @@ -20,6 +20,10 @@ tf_module { name: "Hashing" mtype: "" } + member { + name: "IntegerLookup" + mtype: "" + } member { name: "Normalization" mtype: "" @@ -68,6 +72,10 @@ tf_module { name: "Resizing" mtype: "" } + member { + name: "StringLookup" + mtype: "" + } member { name: "TextVectorization" mtype: "" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.__metaclass__.pbtxt new file mode 100644 index 00000000000..409509cd4d2 --- /dev/null +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.__metaclass__.pbtxt @@ -0,0 +1,14 @@ +path: "tensorflow.keras.layers.experimental.preprocessing.IntegerLookup.__metaclass__" +tf_class { + is_instance: "" + member_method { + name: "__init__" + } + member_method { + name: "mro" + } + member_method { + name: "register" + argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt new file mode 100644 index 00000000000..dedb0f3073f --- /dev/null +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-integer-lookup.pbtxt @@ -0,0 +1,237 @@ +path: "tensorflow.keras.layers.experimental.preprocessing.IntegerLookup" +tf_class { + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + member { + name: "activity_regularizer" + mtype: "" + } + member { + name: "dtype" + mtype: "" + } + member { + name: "dynamic" + mtype: "" + } + member { + name: "inbound_nodes" + mtype: "" + } + member { + name: "input" + mtype: "" + } + member { + name: "input_mask" + mtype: "" + } + member { + name: "input_shape" + mtype: "" + } + member { + name: "input_spec" + mtype: "" + } + member { + name: "losses" + mtype: "" + } + member { + name: "metrics" + mtype: "" + } + member { + name: "name" + mtype: "" + } + member { + name: "name_scope" + mtype: "" + } + member { + name: "non_trainable_variables" + mtype: "" + } + member { + name: "non_trainable_weights" + mtype: "" + } + member { + name: "outbound_nodes" + mtype: "" + } + member { + name: "output" + mtype: "" + } + member { + name: "output_mask" + mtype: "" + } + member { + name: "output_shape" + mtype: "" + } + member { + name: "stateful" + mtype: "" + } + member { + name: "submodules" + mtype: "" + } + member { + name: "trainable" + mtype: "" + } + member { + name: "trainable_variables" + mtype: "" + } + member { + name: "trainable_weights" + mtype: "" + } + member { + name: "updates" + mtype: "" + } + member { + name: "variables" + mtype: "" + } + member { + name: "weights" + mtype: "" + } + member_method { + name: "__init__" + argspec: "args=[\'self\', \'max_values\', \'num_oov_indices\', \'mask_value\', \'oov_value\', \'vocabulary\', \'invert\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'1\', \'0\', \'-1\', \'None\', \'False\'], " + } + member_method { + name: "adapt" + argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + } + member_method { + name: "add_loss" + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" + } + member_method { + name: "add_metric" + argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'None\'], " + } + member_method { + name: "add_update" + argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + } + member_method { + name: "add_variable" + argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None" + } + member_method { + name: "add_weight" + argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], " + } + member_method { + name: "apply" + argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None" + } + member_method { + name: "build" + argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "call" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "compute_mask" + argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " + } + member_method { + name: "compute_output_shape" + argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "compute_output_signature" + argspec: "args=[\'self\', \'input_spec\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "count_params" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_mask_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_shape_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_losses_for" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_mask_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_shape_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_updates_for" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_vocabulary" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_weights" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "set_vocabulary" + argspec: "args=[\'self\', \'vocab\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "set_weights" + argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "vocab_size" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "with_name_scope" + argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.__metaclass__.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.__metaclass__.pbtxt new file mode 100644 index 00000000000..4cb57350380 --- /dev/null +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.__metaclass__.pbtxt @@ -0,0 +1,14 @@ +path: "tensorflow.keras.layers.experimental.preprocessing.StringLookup.__metaclass__" +tf_class { + is_instance: "" + member_method { + name: "__init__" + } + member_method { + name: "mro" + } + member_method { + name: "register" + argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt new file mode 100644 index 00000000000..b419e779c48 --- /dev/null +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.-string-lookup.pbtxt @@ -0,0 +1,237 @@ +path: "tensorflow.keras.layers.experimental.preprocessing.StringLookup" +tf_class { + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + is_instance: "" + member { + name: "activity_regularizer" + mtype: "" + } + member { + name: "dtype" + mtype: "" + } + member { + name: "dynamic" + mtype: "" + } + member { + name: "inbound_nodes" + mtype: "" + } + member { + name: "input" + mtype: "" + } + member { + name: "input_mask" + mtype: "" + } + member { + name: "input_shape" + mtype: "" + } + member { + name: "input_spec" + mtype: "" + } + member { + name: "losses" + mtype: "" + } + member { + name: "metrics" + mtype: "" + } + member { + name: "name" + mtype: "" + } + member { + name: "name_scope" + mtype: "" + } + member { + name: "non_trainable_variables" + mtype: "" + } + member { + name: "non_trainable_weights" + mtype: "" + } + member { + name: "outbound_nodes" + mtype: "" + } + member { + name: "output" + mtype: "" + } + member { + name: "output_mask" + mtype: "" + } + member { + name: "output_shape" + mtype: "" + } + member { + name: "stateful" + mtype: "" + } + member { + name: "submodules" + mtype: "" + } + member { + name: "trainable" + mtype: "" + } + member { + name: "trainable_variables" + mtype: "" + } + member { + name: "trainable_weights" + mtype: "" + } + member { + name: "updates" + mtype: "" + } + member { + name: "variables" + mtype: "" + } + member { + name: "weights" + mtype: "" + } + member_method { + name: "__init__" + argspec: "args=[\'self\', \'max_tokens\', \'num_oov_indices\', \'mask_token\', \'oov_token\', \'vocabulary\', \'encoding\', \'invert\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'1\', \'\', \'[UNK]\', \'None\', \'None\', \'False\'], " + } + member_method { + name: "adapt" + argspec: "args=[\'self\', \'data\', \'reset_state\'], varargs=None, keywords=None, defaults=[\'True\'], " + } + member_method { + name: "add_loss" + argspec: "args=[\'self\', \'losses\'], varargs=None, keywords=kwargs, defaults=None" + } + member_method { + name: "add_metric" + argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'None\'], " + } + member_method { + name: "add_update" + argspec: "args=[\'self\', \'updates\', \'inputs\'], varargs=None, keywords=None, defaults=[\'None\'], " + } + member_method { + name: "add_variable" + argspec: "args=[\'self\'], varargs=args, keywords=kwargs, defaults=None" + } + member_method { + name: "add_weight" + argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'regularizer\', \'trainable\', \'constraint\', \'partitioner\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], " + } + member_method { + name: "apply" + argspec: "args=[\'self\', \'inputs\'], varargs=args, keywords=kwargs, defaults=None" + } + member_method { + name: "build" + argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "call" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "compute_mask" + argspec: "args=[\'self\', \'inputs\', \'mask\'], varargs=None, keywords=None, defaults=[\'None\'], " + } + member_method { + name: "compute_output_shape" + argspec: "args=[\'self\', \'input_shape\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "compute_output_signature" + argspec: "args=[\'self\', \'input_spec\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "count_params" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_mask_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_input_shape_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_losses_for" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_mask_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_output_shape_at" + argspec: "args=[\'self\', \'node_index\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_updates_for" + argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_vocabulary" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_weights" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "set_vocabulary" + argspec: "args=[\'self\', \'vocab\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "set_weights" + argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "vocab_size" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "with_name_scope" + argspec: "args=[\'cls\', \'method\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.pbtxt index 4a0522dc08f..94f6e3ba990 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.keras.layers.experimental.preprocessing.pbtxt @@ -20,6 +20,10 @@ tf_module { name: "Hashing" mtype: "" } + member { + name: "IntegerLookup" + mtype: "" + } member { name: "Normalization" mtype: "" @@ -68,6 +72,10 @@ tf_module { name: "Resizing" mtype: "" } + member { + name: "StringLookup" + mtype: "" + } member { name: "TextVectorization" mtype: ""