Move the init file content to API gen build rule.

The final leftover piece is keras.layer.

PiperOrigin-RevId: 276390172
Change-Id: Ie2efc73e9b987df15fb8085f82b8369eca8ce664
This commit is contained in:
Scott Zhu 2019-10-23 17:53:45 -07:00 committed by TensorFlower Gardener
parent 5ca25fda42
commit ef15e65bca
8 changed files with 83 additions and 84 deletions

View File

@ -23,10 +23,7 @@ from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.keras import estimator
from tensorflow.python.keras import layers
from tensorflow.python.keras import premade
from tensorflow.python.keras import preprocessing
from tensorflow.python.keras.layers import Input
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.models import Sequential

View File

@ -37,12 +37,18 @@ keras_packages = [
"tensorflow.python.keras.datasets.imdb",
"tensorflow.python.keras.datasets.mnist",
"tensorflow.python.keras.datasets.reuters",
"tensorflow.python.keras.estimator",
"tensorflow.python.keras.initializers",
"tensorflow.python.keras.losses",
"tensorflow.python.keras.metrics",
"tensorflow.python.keras.models",
"tensorflow.python.keras.ops",
"tensorflow.python.keras.optimizers",
"tensorflow.python.keras.premade.linear",
"tensorflow.python.keras.premade.wide_deep",
"tensorflow.python.keras.preprocessing.image",
"tensorflow.python.keras.preprocessing.sequence",
"tensorflow.python.keras.preprocessing.text",
"tensorflow.python.keras.regularizers",
"tensorflow.python.keras.saving.model_config",
"tensorflow.python.keras.saving.save",

View File

@ -32,6 +32,7 @@ from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.python.util import nest
_RANDOM_SEED = 1337
@ -619,7 +620,7 @@ class TestDistributionStrategyEmbeddingModelCorrectnessBase(
labels.append(label)
features.append(word_ids)
features = keras.preprocessing.sequence.pad_sequences(
features = sequence.pad_sequences(
features, maxlen=max_words)
x_train = np.asarray(features, dtype=np.float32)
y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1))

View File

@ -29,10 +29,6 @@ from tensorflow.python.keras.utils import all_utils as utils
keras_preprocessing.set_keras_submodules(backend=backend, utils=utils)
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.python.keras.preprocessing import text
del absolute_import
del division
del print_function

View File

@ -24,7 +24,7 @@ import tempfile
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras.preprocessing import image as preprocessing_image
from tensorflow.python.platform import test
try:
@ -41,11 +41,11 @@ def _generate_test_images():
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = keras.preprocessing.image.array_to_img(imarray, scale=False)
im = preprocessing_image.array_to_img(imarray, scale=False)
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = keras.preprocessing.image.array_to_img(imarray, scale=False)
im = preprocessing_image.array_to_img(imarray, scale=False)
gray_images.append(im)
return [rgb_images, gray_images]
@ -60,10 +60,10 @@ class TestImage(test.TestCase):
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(keras.preprocessing.image.img_to_array(im)[None, ...])
img_list.append(preprocessing_image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = keras.preprocessing.image.ImageDataGenerator(
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
@ -96,10 +96,10 @@ class TestImage(test.TestCase):
def test_image_data_generator_with_split_value_error(self):
with self.assertRaises(ValueError):
keras.preprocessing.image.ImageDataGenerator(validation_split=5)
preprocessing_image.ImageDataGenerator(validation_split=5)
def test_image_data_generator_invalid_data(self):
generator = keras.preprocessing.image.ImageDataGenerator(
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
@ -119,14 +119,14 @@ class TestImage(test.TestCase):
generator.flow(x)
with self.assertRaises(ValueError):
generator = keras.preprocessing.image.ImageDataGenerator(
generator = preprocessing_image.ImageDataGenerator(
data_format='unknown')
generator = keras.preprocessing.image.ImageDataGenerator(
generator = preprocessing_image.ImageDataGenerator(
zoom_range=(2, 2))
def test_image_data_generator_fit(self):
generator = keras.preprocessing.image.ImageDataGenerator(
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
@ -139,7 +139,7 @@ class TestImage(test.TestCase):
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
generator = keras.preprocessing.image.ImageDataGenerator(
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
@ -192,14 +192,14 @@ class TestImage(test.TestCase):
# Test image loading util
fname = os.path.join(temp_dir, filenames[0])
_ = keras.preprocessing.image.load_img(fname)
_ = keras.preprocessing.image.load_img(fname, grayscale=True)
_ = keras.preprocessing.image.load_img(fname, target_size=(10, 10))
_ = keras.preprocessing.image.load_img(fname, target_size=(10, 10),
interpolation='bilinear')
_ = preprocessing_image.load_img(fname)
_ = preprocessing_image.load_img(fname, grayscale=True)
_ = preprocessing_image.load_img(fname, target_size=(10, 10))
_ = preprocessing_image.load_img(fname, target_size=(10, 10),
interpolation='bilinear')
# create iterator
generator = keras.preprocessing.image.ImageDataGenerator()
generator = preprocessing_image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(temp_dir)
# check number of classes and images
@ -223,7 +223,7 @@ class TestImage(test.TestCase):
return np.zeros_like(x)
# Test usage as Sequence
generator = keras.preprocessing.image.ImageDataGenerator(
generator = preprocessing_image.ImageDataGenerator(
preprocessing_function=preprocessing_function)
dir_seq = generator.flow_from_directory(
str(temp_dir),
@ -276,7 +276,7 @@ class TestImage(test.TestCase):
count += 1
# create iterator
generator = keras.preprocessing.image.ImageDataGenerator(
generator = preprocessing_image.ImageDataGenerator(
validation_split=validation_split)
with self.assertRaises(ValueError):
@ -317,32 +317,32 @@ class TestImage(test.TestCase):
# Test channels_first data format
x = np.random.random((3, height, width))
img = keras.preprocessing.image.array_to_img(
img = preprocessing_image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(
x = preprocessing_image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (3, height, width))
# Test 2D
x = np.random.random((1, height, width))
img = keras.preprocessing.image.array_to_img(
img = preprocessing_image.array_to_img(
x, data_format='channels_first')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(
x = preprocessing_image.img_to_array(
img, data_format='channels_first')
self.assertEqual(x.shape, (1, height, width))
# Test channels_last data format
x = np.random.random((height, width, 3))
img = keras.preprocessing.image.array_to_img(x, data_format='channels_last')
img = preprocessing_image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(img, data_format='channels_last')
x = preprocessing_image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 3))
# Test 2D
x = np.random.random((height, width, 1))
img = keras.preprocessing.image.array_to_img(x, data_format='channels_last')
img = preprocessing_image.array_to_img(x, data_format='channels_last')
self.assertEqual(img.size, (width, height))
x = keras.preprocessing.image.img_to_array(img, data_format='channels_last')
x = preprocessing_image.img_to_array(img, data_format='channels_last')
self.assertEqual(x.shape, (height, width, 1))
def test_batch_standardize(self):
@ -353,10 +353,10 @@ class TestImage(test.TestCase):
for test_images in _generate_test_images():
img_list = []
for im in test_images:
img_list.append(keras.preprocessing.image.img_to_array(im)[None, ...])
img_list.append(preprocessing_image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = keras.preprocessing.image.ImageDataGenerator(
generator = preprocessing_image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
@ -382,15 +382,15 @@ class TestImage(test.TestCase):
def test_img_transforms(self):
x = np.random.random((3, 200, 200))
_ = keras.preprocessing.image.random_rotation(x, 20)
_ = keras.preprocessing.image.random_shift(x, 0.2, 0.2)
_ = keras.preprocessing.image.random_shear(x, 2.)
_ = keras.preprocessing.image.random_zoom(x, (0.5, 0.5))
_ = keras.preprocessing.image.apply_channel_shift(x, 2, 2)
_ = keras.preprocessing.image.apply_affine_transform(x, 2)
_ = preprocessing_image.random_rotation(x, 20)
_ = preprocessing_image.random_shift(x, 0.2, 0.2)
_ = preprocessing_image.random_shear(x, 2.)
_ = preprocessing_image.random_zoom(x, (0.5, 0.5))
_ = preprocessing_image.apply_channel_shift(x, 2, 2)
_ = preprocessing_image.apply_affine_transform(x, 2)
with self.assertRaises(ValueError):
keras.preprocessing.image.random_zoom(x, (0, 0, 0))
_ = keras.preprocessing.image.random_channel_shift(x, 2.)
preprocessing_image.random_zoom(x, (0, 0, 0))
_ = preprocessing_image.random_channel_shift(x, 2.)
if __name__ == '__main__':

View File

@ -22,7 +22,7 @@ from math import ceil
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras.preprocessing import sequence as preprocessing_sequence
from tensorflow.python.platform import test
@ -32,65 +32,65 @@ class TestSequence(test.TestCase):
a = [[1], [1, 2], [1, 2, 3]]
# test padding
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='pre')
b = preprocessing_sequence.pad_sequences(a, maxlen=3, padding='pre')
self.assertAllClose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='post')
b = preprocessing_sequence.pad_sequences(a, maxlen=3, padding='post')
self.assertAllClose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
# test truncating
b = keras.preprocessing.sequence.pad_sequences(
b = preprocessing_sequence.pad_sequences(
a, maxlen=2, truncating='pre')
self.assertAllClose(b, [[0, 1], [1, 2], [2, 3]])
b = keras.preprocessing.sequence.pad_sequences(
b = preprocessing_sequence.pad_sequences(
a, maxlen=2, truncating='post')
self.assertAllClose(b, [[0, 1], [1, 2], [1, 2]])
# test value
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, value=1)
b = preprocessing_sequence.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]])
def test_pad_sequences_vector(self):
a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]
# test padding
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='pre')
b = preprocessing_sequence.pad_sequences(a, maxlen=3, padding='pre')
self.assertAllClose(b, [[[0, 0], [0, 0], [1, 1]], [[0, 0], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='post')
b = preprocessing_sequence.pad_sequences(a, maxlen=3, padding='post')
self.assertAllClose(b, [[[1, 1], [0, 0], [0, 0]], [[2, 1], [2, 2], [0, 0]],
[[3, 1], [3, 2], [3, 3]]])
# test truncating
b = keras.preprocessing.sequence.pad_sequences(
b = preprocessing_sequence.pad_sequences(
a, maxlen=2, truncating='pre')
self.assertAllClose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 2], [3,
3]]])
b = keras.preprocessing.sequence.pad_sequences(
b = preprocessing_sequence.pad_sequences(
a, maxlen=2, truncating='post')
self.assertAllClose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 1], [3,
2]]])
# test value
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, value=1)
b = preprocessing_sequence.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[[1, 1], [1, 1], [1, 1]], [[1, 1], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
def test_make_sampling_table(self):
a = keras.preprocessing.sequence.make_sampling_table(3)
a = preprocessing_sequence.make_sampling_table(3)
self.assertAllClose(
a, np.asarray([0.00315225, 0.00315225, 0.00547597]), rtol=.1)
def test_skipgrams(self):
# test with no window size and binary labels
couples, labels = keras.preprocessing.sequence.skipgrams(
couples, labels = preprocessing_sequence.skipgrams(
np.arange(3), vocabulary_size=3)
for couple in couples:
self.assertIn(couple[0], [0, 1, 2])
self.assertIn(couple[1], [0, 1, 2])
# test window size and categorical labels
couples, labels = keras.preprocessing.sequence.skipgrams(
couples, labels = preprocessing_sequence.skipgrams(
np.arange(5), vocabulary_size=5, window_size=1, categorical=True)
for couple in couples:
self.assertLessEqual(couple[0] - couple[1], 3)
@ -100,7 +100,7 @@ class TestSequence(test.TestCase):
def test_remove_long_seq(self):
a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]
new_seq, new_label = keras.preprocessing.sequence._remove_long_seq(
new_seq, new_label = preprocessing_sequence._remove_long_seq(
maxlen=3, seq=a, label=['a', 'b', ['c', 'd']])
self.assertEqual(new_seq, [[[1, 1]], [[2, 1], [2, 2]]])
self.assertEqual(new_label, ['a', 'b'])
@ -109,7 +109,7 @@ class TestSequence(test.TestCase):
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data_gen = preprocessing_sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, batch_size=2)
self.assertEqual(len(data_gen), 20)
self.assertAllClose(data_gen[0][0],
@ -121,7 +121,7 @@ class TestSequence(test.TestCase):
[9], [11]]]))
self.assertAllClose(data_gen[1][1], np.array([[12], [13]]))
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data_gen = preprocessing_sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, reverse=True, batch_size=2)
self.assertEqual(len(data_gen), 20)
self.assertAllClose(data_gen[0][0],
@ -129,7 +129,7 @@ class TestSequence(test.TestCase):
[3], [1]]]))
self.assertAllClose(data_gen[0][1], np.array([[10], [11]]))
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data_gen = preprocessing_sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, shuffle=True, batch_size=1)
batch = data_gen[0]
r = batch[1][0][0]
@ -140,7 +140,7 @@ class TestSequence(test.TestCase):
[r],
]))
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data_gen = preprocessing_sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, stride=2, batch_size=2)
self.assertEqual(len(data_gen), 10)
self.assertAllClose(data_gen[1][0],
@ -148,7 +148,7 @@ class TestSequence(test.TestCase):
[12], [14]]]))
self.assertAllClose(data_gen[1][1], np.array([[14], [16]]))
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data_gen = preprocessing_sequence.TimeseriesGenerator(
data,
targets,
length=10,
@ -164,7 +164,7 @@ class TestSequence(test.TestCase):
data = np.array([np.random.random_sample((1, 2, 3, 4)) for i in range(50)])
targets = np.array([np.random.random_sample((3, 2, 1)) for i in range(50)])
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data_gen = preprocessing_sequence.TimeseriesGenerator(
data,
targets,
length=10,
@ -181,7 +181,7 @@ class TestSequence(test.TestCase):
self.assertAllClose(data_gen[0][1], np.array([targets[20], targets[21]]))
with self.assertRaises(ValueError) as context:
keras.preprocessing.sequence.TimeseriesGenerator(data, targets, length=50)
preprocessing_sequence.TimeseriesGenerator(data, targets, length=50)
error = str(context.exception)
self.assertIn('`start_index+length=50 > end_index=49` is disallowed', error)
@ -189,7 +189,7 @@ class TestSequence(test.TestCase):
x = np.array([[i] for i in range(10)])
for length in range(3, 10):
g = keras.preprocessing.sequence.TimeseriesGenerator(
g = preprocessing_sequence.TimeseriesGenerator(
x, x, length=length, batch_size=1)
expected = max(0, len(x) - length)
actual = len(g)
@ -211,7 +211,7 @@ class TestSequence(test.TestCase):
for stride, length, batch_size, shuffle in zip(strides, lengths,
batch_sizes, shuffles):
g = keras.preprocessing.sequence.TimeseriesGenerator(
g = preprocessing_sequence.TimeseriesGenerator(
x,
x,
length=length,

View File

@ -21,7 +21,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras.preprocessing import text as preprocessing_text
from tensorflow.python.platform import test
@ -29,14 +29,14 @@ class TestText(test.TestCase):
def test_one_hot(self):
text = 'The cat sat on the mat.'
encoded = keras.preprocessing.text.one_hot(text, 5)
encoded = preprocessing_text.one_hot(text, 5)
self.assertEqual(len(encoded), 6)
self.assertLessEqual(np.max(encoded), 4)
self.assertGreaterEqual(np.min(encoded), 0)
# Test on unicode.
text = u'The cat sat on the mat.'
encoded = keras.preprocessing.text.one_hot(text, 5)
encoded = preprocessing_text.one_hot(text, 5)
self.assertEqual(len(encoded), 6)
self.assertLessEqual(np.max(encoded), 4)
self.assertGreaterEqual(np.min(encoded), 0)
@ -47,7 +47,7 @@ class TestText(test.TestCase):
'The dog sat on the log.',
'Dogs and cats living together.'
]
tokenizer = keras.preprocessing.text.Tokenizer(num_words=10)
tokenizer = preprocessing_text.Tokenizer(num_words=10)
tokenizer.fit_on_texts(texts)
sequences = []
@ -64,14 +64,14 @@ class TestText(test.TestCase):
def test_hashing_trick_hash(self):
text = 'The cat sat on the mat.'
encoded = keras.preprocessing.text.hashing_trick(text, 5)
encoded = preprocessing_text.hashing_trick(text, 5)
self.assertEqual(len(encoded), 6)
self.assertLessEqual(np.max(encoded), 4)
self.assertGreaterEqual(np.min(encoded), 1)
def test_hashing_trick_md5(self):
text = 'The cat sat on the mat.'
encoded = keras.preprocessing.text.hashing_trick(
encoded = preprocessing_text.hashing_trick(
text, 5, hash_function='md5')
self.assertEqual(len(encoded), 6)
self.assertLessEqual(np.max(encoded), 4)
@ -82,13 +82,13 @@ class TestText(test.TestCase):
x_test = ['This text has some unknown words'] # 2 OOVs: some, unknown
# Default, without OOV flag
tokenizer = keras.preprocessing.text.Tokenizer()
tokenizer = preprocessing_text.Tokenizer()
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
self.assertEqual(len(x_test_seq[0]), 4) # discards 2 OOVs
# With OOV feature
tokenizer = keras.preprocessing.text.Tokenizer(oov_token='<unk>')
tokenizer = preprocessing_text.Tokenizer(oov_token='<unk>')
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
self.assertEqual(len(x_test_seq[0]), 6) # OOVs marked in place
@ -100,7 +100,7 @@ class TestText(test.TestCase):
]
word_sequences = [['The', 'cat', 'is', 'sitting'],
['The', 'dog', 'is', 'standing']]
tokenizer = keras.preprocessing.text.Tokenizer()
tokenizer = preprocessing_text.Tokenizer()
tokenizer.fit_on_texts(texts)
tokenizer.fit_on_texts(word_sequences)
@ -111,29 +111,29 @@ class TestText(test.TestCase):
def test_text_to_word_sequence(self):
text = 'hello! ? world!'
seq = keras.preprocessing.text.text_to_word_sequence(text)
seq = preprocessing_text.text_to_word_sequence(text)
self.assertEqual(seq, ['hello', 'world'])
def test_text_to_word_sequence_multichar_split(self):
text = 'hello!stop?world!'
seq = keras.preprocessing.text.text_to_word_sequence(text, split='stop')
seq = preprocessing_text.text_to_word_sequence(text, split='stop')
self.assertEqual(seq, ['hello', 'world'])
def test_text_to_word_sequence_unicode(self):
text = u'ali! veli? kırk dokuz elli'
seq = keras.preprocessing.text.text_to_word_sequence(text)
seq = preprocessing_text.text_to_word_sequence(text)
self.assertEqual(seq, [u'ali', u'veli', u'kırk', u'dokuz', u'elli'])
def test_text_to_word_sequence_unicode_multichar_split(self):
text = u'ali!stopveli?stopkırkstopdokuzstopelli'
seq = keras.preprocessing.text.text_to_word_sequence(text, split='stop')
seq = preprocessing_text.text_to_word_sequence(text, split='stop')
self.assertEqual(seq, [u'ali', u'veli', u'kırk', u'dokuz', u'elli'])
def test_tokenizer_unicode(self):
texts = [
u'ali veli kırk dokuz elli', u'ali veli kırk dokuz elli veli kırk dokuz'
]
tokenizer = keras.preprocessing.text.Tokenizer(num_words=5)
tokenizer = preprocessing_text.Tokenizer(num_words=5)
tokenizer.fit_on_texts(texts)
self.assertEqual(len(tokenizer.word_counts), 5)

View File

@ -44,7 +44,6 @@ _TENSORFLOW_DOC_SOURCES = {
'gfile': DocSource(docstring_module_name='platform.gfile'),
'graph_util': DocSource(docstring_module_name='framework.graph_util'),
'image': DocSource(docstring_module_name='ops.image_ops'),
'keras.estimator': DocSource(docstring_module_name='keras.estimator'),
'linalg': DocSource(docstring_module_name='ops.linalg_ops'),
'logging': DocSource(docstring_module_name='ops.logging_ops'),
'losses': DocSource(docstring_module_name='ops.losses.losses'),