Remove the __init__.py content for keras/utils.
1. Change all imports that uses keras.utils to be explicit import of individual module. 2. Removed deprecated util imports in keras_preprocessing. 3. Moved all the public symbol from __init__.py to all_utils.py, which is used by keras/application for injection. PiperOrigin-RevId: 273327600
This commit is contained in:
parent
785de35b28
commit
cab28a0bb1
@ -27,7 +27,6 @@ from tensorflow.core.example import example_pb2
|
||||
from tensorflow.core.example import feature_pb2
|
||||
from tensorflow.core.protobuf import config_pb2
|
||||
from tensorflow.core.protobuf import rewriter_config_pb2
|
||||
from tensorflow.python import keras
|
||||
from tensorflow.python.client import session
|
||||
from tensorflow.python.eager import backprop
|
||||
from tensorflow.python.eager import context
|
||||
@ -41,6 +40,7 @@ from tensorflow.python.framework import errors
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import sparse_tensor
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import init_ops
|
||||
from tensorflow.python.ops import lookup_ops
|
||||
@ -2085,7 +2085,7 @@ class LinearModelTest(test.TestCase):
|
||||
|
||||
x = {'a': np.random.random((10, 1))}
|
||||
y = np.random.randint(20, size=(10, 1))
|
||||
y = keras.utils.to_categorical(y, num_classes=20)
|
||||
y = np_utils.to_categorical(y, num_classes=20)
|
||||
model.fit(x, y, epochs=1, batch_size=5)
|
||||
model.fit(x, y, epochs=1, batch_size=5)
|
||||
model.evaluate(x, y, batch_size=5)
|
||||
|
@ -30,7 +30,7 @@ from tensorflow.python.feature_column import utils as fc_utils
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.keras import utils
|
||||
from tensorflow.python.keras.utils import generic_utils
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import check_ops
|
||||
from tensorflow.python.ops import parsing_ops
|
||||
@ -585,7 +585,8 @@ class SequenceNumericColumn(
|
||||
def get_config(self):
|
||||
"""See 'FeatureColumn` base class."""
|
||||
config = dict(zip(self._fields, self))
|
||||
config['normalizer_fn'] = utils.serialize_keras_object(self.normalizer_fn)
|
||||
config['normalizer_fn'] = generic_utils.serialize_keras_object(
|
||||
self.normalizer_fn)
|
||||
config['dtype'] = self.dtype.name
|
||||
return config
|
||||
|
||||
@ -594,7 +595,7 @@ class SequenceNumericColumn(
|
||||
"""See 'FeatureColumn` base class."""
|
||||
fc._check_config_keys(config, cls._fields)
|
||||
kwargs = fc._standardize_and_copy_config(config)
|
||||
kwargs['normalizer_fn'] = utils.deserialize_keras_object(
|
||||
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
|
||||
config['normalizer_fn'], custom_objects=custom_objects)
|
||||
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
|
||||
return cls(**kwargs)
|
||||
|
@ -28,7 +28,7 @@ from tensorflow.python.util.lazy_loader import LazyLoader
|
||||
# Prevent circular dependencies with Keras serialization.
|
||||
generic_utils = LazyLoader(
|
||||
'generic_utils', globals(),
|
||||
'tensorflow.python.keras.utils')
|
||||
'tensorflow.python.keras.utils.generic_utils')
|
||||
|
||||
_FEATURE_COLUMNS = [
|
||||
fc_lib.BucketizedColumn, fc_lib.CrossedColumn, fc_lib.EmbeddingColumn,
|
||||
|
@ -51,6 +51,7 @@ py_library(
|
||||
"preprocessing/text.py",
|
||||
"testing_utils.py",
|
||||
"utils/__init__.py",
|
||||
"utils/all_utils.py",
|
||||
"utils/multi_gpu_utils.py",
|
||||
"utils/np_utils.py",
|
||||
"utils/vis_utils.py",
|
||||
|
@ -41,7 +41,6 @@ from tensorflow.python.keras import optimizers
|
||||
from tensorflow.python.keras import premade
|
||||
from tensorflow.python.keras import preprocessing
|
||||
from tensorflow.python.keras import regularizers
|
||||
from tensorflow.python.keras import utils
|
||||
from tensorflow.python.keras.layers import Input
|
||||
from tensorflow.python.keras.models import Model
|
||||
from tensorflow.python.keras.models import Sequential
|
||||
|
@ -13,6 +13,13 @@ package(
|
||||
keras_packages = [
|
||||
"tensorflow.python",
|
||||
"tensorflow.python.keras",
|
||||
"tensorflow.python.keras.utils.data_utils",
|
||||
"tensorflow.python.keras.utils.generic_utils",
|
||||
"tensorflow.python.keras.utils.io_utils",
|
||||
"tensorflow.python.keras.utils.layer_utils",
|
||||
"tensorflow.python.keras.utils.multi_gpu_utils",
|
||||
"tensorflow.python.keras.utils.np_utils",
|
||||
"tensorflow.python.keras.utils.vis_utils",
|
||||
"tensorflow.python.keras.wrappers.scikit_learn",
|
||||
]
|
||||
|
||||
|
@ -25,7 +25,7 @@ from tensorflow.python.keras import backend
|
||||
from tensorflow.python.keras import engine
|
||||
from tensorflow.python.keras import layers
|
||||
from tensorflow.python.keras import models
|
||||
from tensorflow.python.keras import utils
|
||||
from tensorflow.python.keras.utils import all_utils
|
||||
from tensorflow.python.util import tf_inspect
|
||||
|
||||
|
||||
@ -45,7 +45,7 @@ def keras_modules_injection(base_fun):
|
||||
if 'layers' not in kwargs:
|
||||
kwargs['layers'] = layers
|
||||
kwargs['models'] = models
|
||||
kwargs['utils'] = utils
|
||||
kwargs['utils'] = all_utils
|
||||
return base_fun(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
@ -41,6 +41,8 @@ from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.engine import base_layer
|
||||
from tensorflow.python.keras.engine import sequential
|
||||
from tensorflow.python.keras.optimizer_v2 import gradient_descent
|
||||
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import summary_ops_v2
|
||||
@ -49,7 +51,6 @@ from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.summary import summary_iterator
|
||||
from tensorflow.python.training import adam
|
||||
from tensorflow.python.training import checkpoint_management
|
||||
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
|
||||
|
||||
try:
|
||||
import h5py # pylint:disable=g-import-not-at-top
|
||||
@ -371,8 +372,8 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
# case 1
|
||||
monitor = 'val_loss'
|
||||
save_best_only = False
|
||||
@ -803,8 +804,8 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
model = testing_utils.get_small_sequential_mlp(
|
||||
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
|
||||
model.compile(
|
||||
@ -932,8 +933,8 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
model = testing_utils.get_small_sequential_mlp(
|
||||
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
|
||||
model.compile(
|
||||
@ -1003,8 +1004,8 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
def make_model():
|
||||
random_seed.set_random_seed(1234)
|
||||
@ -1110,8 +1111,8 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
def make_model():
|
||||
np.random.seed(1337)
|
||||
@ -1189,8 +1190,8 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
|
||||
model = keras.models.Sequential()
|
||||
for _ in range(5):
|
||||
@ -1241,8 +1242,8 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
cbks = [keras.callbacks.TerminateOnNaN()]
|
||||
model = keras.models.Sequential()
|
||||
initializer = keras.initializers.Constant(value=1e5)
|
||||
@ -1278,8 +1279,8 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
model = keras.models.Sequential()
|
||||
model.add(
|
||||
keras.layers.Dense(
|
||||
|
@ -29,6 +29,7 @@ from tensorflow.python import keras
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import callbacks_v1
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.training import adam
|
||||
|
||||
@ -55,8 +56,8 @@ class TestTensorBoardV1(test.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
def data_generator(train):
|
||||
if train:
|
||||
@ -164,8 +165,8 @@ class TestTensorBoardV1(test.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
def data_generator(train):
|
||||
if train:
|
||||
@ -268,8 +269,8 @@ class TestTensorBoardV1(test.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
with self.cached_session():
|
||||
model = keras.models.Sequential()
|
||||
@ -364,8 +365,8 @@ class TestTensorBoardV1(test.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
model = testing_utils.get_small_sequential_mlp(
|
||||
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
|
||||
@ -475,8 +476,8 @@ class TestTensorBoardV1(test.TestCase):
|
||||
test_samples=TEST_SAMPLES,
|
||||
input_shape=(INPUT_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_test = np_utils.to_categorical(y_test)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
model = testing_utils.get_small_sequential_mlp(
|
||||
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
|
||||
|
@ -37,6 +37,7 @@ from tensorflow.python.keras.distribute import distributed_training_utils
|
||||
from tensorflow.python.keras.engine import base_layer_utils
|
||||
from tensorflow.python.keras.mixed_precision.experimental import policy
|
||||
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import check_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
@ -112,10 +113,10 @@ def get_multi_inputs_multi_outputs_data():
|
||||
num_classes=2,
|
||||
random_seed=_RANDOM_SEED)
|
||||
|
||||
c_train = keras.utils.to_categorical(c_train)
|
||||
c_test = keras.utils.to_categorical(c_test)
|
||||
d_train = keras.utils.to_categorical(d_train)
|
||||
d_test = keras.utils.to_categorical(d_test)
|
||||
c_train = np_utils.to_categorical(c_train)
|
||||
c_test = np_utils.to_categorical(c_test)
|
||||
d_train = np_utils.to_categorical(d_train)
|
||||
d_test = np_utils.to_categorical(d_test)
|
||||
|
||||
train_data = {
|
||||
'input_a': a_train,
|
||||
|
@ -26,6 +26,7 @@ from tensorflow.python.feature_column import feature_column_lib as fc
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import metrics as metrics_module
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
@ -64,7 +65,7 @@ class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
|
||||
|
||||
x = {'a': np.random.random((10, 1))}
|
||||
y = np.random.randint(20, size=(10, 1))
|
||||
y = keras.utils.to_categorical(y, num_classes=20)
|
||||
y = np_utils.to_categorical(y, num_classes=20)
|
||||
model.fit(x, y, epochs=1, batch_size=5)
|
||||
model.fit(x, y, epochs=1, batch_size=5)
|
||||
model.evaluate(x, y, batch_size=5)
|
||||
@ -86,7 +87,7 @@ class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
|
||||
experimental_run_tf_function=testing_utils.should_run_tf_function())
|
||||
|
||||
y = np.random.randint(20, size=(100, 1))
|
||||
y = keras.utils.to_categorical(y, num_classes=20)
|
||||
y = np_utils.to_categorical(y, num_classes=20)
|
||||
x = {'a': np.random.random((100, 1))}
|
||||
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
|
||||
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
|
||||
@ -151,7 +152,7 @@ class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
|
||||
|
||||
x = {'a': np.random.random((10, 1)), 'b': np.random.random((10, 1))}
|
||||
y = np.random.randint(20, size=(10, 1))
|
||||
y = keras.utils.to_categorical(y, num_classes=20)
|
||||
y = np_utils.to_categorical(y, num_classes=20)
|
||||
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
|
||||
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
|
||||
dnn_model.evaluate(x=x, y=y, batch_size=5)
|
||||
@ -172,7 +173,7 @@ class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
|
||||
experimental_run_tf_function=testing_utils.should_run_tf_function())
|
||||
|
||||
y = np.random.randint(20, size=(100, 1))
|
||||
y = keras.utils.to_categorical(y, num_classes=20)
|
||||
y = np_utils.to_categorical(y, num_classes=20)
|
||||
x = {'a': np.random.random((100, 1)), 'b': np.random.random((100, 1))}
|
||||
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
|
||||
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
|
||||
|
@ -416,7 +416,7 @@ class TestGeneratorMethodsWithSequences(keras_parameterized.TestCase):
|
||||
@data_utils.dont_use_multiprocessing_pool
|
||||
def test_training_with_sequences(self):
|
||||
|
||||
class DummySequence(keras.utils.Sequence):
|
||||
class DummySequence(data_utils.Sequence):
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return np.zeros([10, 2]), np.ones([10, 4])
|
||||
@ -449,7 +449,7 @@ class TestGeneratorMethodsWithSequences(keras_parameterized.TestCase):
|
||||
def test_sequence_input_to_fit_eval_predict(self):
|
||||
val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
|
||||
|
||||
class CustomSequence(keras.utils.Sequence):
|
||||
class CustomSequence(data_utils.Sequence):
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
|
||||
@ -457,7 +457,7 @@ class TestGeneratorMethodsWithSequences(keras_parameterized.TestCase):
|
||||
def __len__(self):
|
||||
return 2
|
||||
|
||||
class CustomSequenceChangingBatchSize(keras.utils.Sequence):
|
||||
class CustomSequenceChangingBatchSize(data_utils.Sequence):
|
||||
|
||||
def __getitem__(self, idx):
|
||||
batch_size = 10 - idx
|
||||
|
@ -39,9 +39,11 @@ from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import losses
|
||||
from tensorflow.python.keras import metrics as metrics_module
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.engine import training_utils
|
||||
from tensorflow.python.keras.callbacks import Callback
|
||||
from tensorflow.python.keras.engine import training_utils
|
||||
from tensorflow.python.keras.optimizer_v2 import gradient_descent
|
||||
from tensorflow.python.keras.utils import data_utils
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import sparse_ops
|
||||
@ -1204,8 +1206,8 @@ class TrainingTest(keras_parameterized.TestCase):
|
||||
with context.eager_mode():
|
||||
np.random.seed(1337)
|
||||
train_x = np.ones((100, 4))
|
||||
train_y = keras.utils.to_categorical(np.random.randint(0, 1,
|
||||
size=(100, 1)), 2)
|
||||
train_y = np_utils.to_categorical(
|
||||
np.random.randint(0, 1, size=(100, 1)), 2)
|
||||
|
||||
reference_model = testing_utils.get_small_sequential_mlp(16, 2,
|
||||
input_dim=4)
|
||||
@ -1599,7 +1601,7 @@ class TestExceptionsAndWarnings(keras_parameterized.TestCase):
|
||||
@keras_parameterized.run_all_keras_modes
|
||||
def test_invalid_batch_size_argument_with_sequence_input(self):
|
||||
|
||||
class DummySequence(keras.utils.Sequence):
|
||||
class DummySequence(data_utils.Sequence):
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return np.zeros([10, 2]), np.ones([10, 4])
|
||||
@ -1673,8 +1675,8 @@ class LossWeightingTest(keras_parameterized.TestCase):
|
||||
int_y_test = y_test.copy()
|
||||
int_y_train = y_train.copy()
|
||||
# convert class vectors to binary class matrices
|
||||
y_train = keras.utils.to_categorical(y_train, num_classes)
|
||||
y_test = keras.utils.to_categorical(y_test, num_classes)
|
||||
y_train = np_utils.to_categorical(y_train, num_classes)
|
||||
y_test = np_utils.to_categorical(y_test, num_classes)
|
||||
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
|
||||
|
||||
class_weight = dict([(i, 1.) for i in range(num_classes)])
|
||||
@ -1742,8 +1744,8 @@ class LossWeightingTest(keras_parameterized.TestCase):
|
||||
int_y_test = y_test.copy()
|
||||
int_y_train = y_train.copy()
|
||||
# convert class vectors to binary class matrices
|
||||
y_train = keras.utils.to_categorical(y_train, num_classes)
|
||||
y_test = keras.utils.to_categorical(y_test, num_classes)
|
||||
y_train = np_utils.to_categorical(y_train, num_classes)
|
||||
y_test = np_utils.to_categorical(y_test, num_classes)
|
||||
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
|
||||
|
||||
sample_weight = np.ones((y_train.shape[0]))
|
||||
@ -1812,8 +1814,8 @@ class LossWeightingTest(keras_parameterized.TestCase):
|
||||
int_y_test = y_test.copy()
|
||||
int_y_train = y_train.copy()
|
||||
# convert class vectors to binary class matrices
|
||||
y_train = keras.utils.to_categorical(y_train, num_classes)
|
||||
y_test = keras.utils.to_categorical(y_test, num_classes)
|
||||
y_train = np_utils.to_categorical(y_train, num_classes)
|
||||
y_test = np_utils.to_categorical(y_test, num_classes)
|
||||
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
|
||||
|
||||
sample_weight = np.ones((y_train.shape[0]))
|
||||
@ -1938,7 +1940,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
|
||||
input_shape=(input_dim,),
|
||||
num_classes=num_classes)
|
||||
# convert class vectors to binary class matrices
|
||||
y_train = keras.utils.to_categorical(y_train, num_classes)
|
||||
y_train = np_utils.to_categorical(y_train, num_classes)
|
||||
class_weight = dict([(i, 1.) for i in range(num_classes)])
|
||||
|
||||
del class_weight[1]
|
||||
|
@ -28,6 +28,7 @@ from tensorflow.python.eager import context
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.ops import nn_ops as nn
|
||||
from tensorflow.python.ops import rnn_cell
|
||||
from tensorflow.python.platform import test
|
||||
@ -62,7 +63,7 @@ class VectorClassificationIntegrationTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(10,),
|
||||
num_classes=2)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
model = testing_utils.get_model_from_layers(
|
||||
[keras.layers.Dense(16, activation='relu'),
|
||||
@ -93,7 +94,7 @@ class VectorClassificationIntegrationTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(10,),
|
||||
num_classes=2)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
base_model = testing_utils.get_model_from_layers(
|
||||
[keras.layers.Dense(16,
|
||||
@ -141,7 +142,7 @@ class SequentialIntegrationTest(KerasIntegrationTest):
|
||||
test_samples=0,
|
||||
input_shape=(10,),
|
||||
num_classes=2)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
model = keras.Sequential([
|
||||
keras.layers.Dense(16, activation='relu'),
|
||||
keras.layers.Dropout(0.1),
|
||||
@ -199,7 +200,7 @@ class TimeseriesClassificationIntegrationTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(4, 10),
|
||||
num_classes=2)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
layers = [
|
||||
keras.layers.LSTM(5, return_sequences=True),
|
||||
@ -229,7 +230,7 @@ class TimeseriesClassificationIntegrationTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(4, 10),
|
||||
num_classes=2)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
model = keras.models.Sequential()
|
||||
model.add(keras.layers.RNN(rnn_cell.LSTMCell(5), return_sequences=True,
|
||||
@ -264,7 +265,7 @@ class ImageClassificationIntegrationTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(10, 10, 3),
|
||||
num_classes=2)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
layers = [
|
||||
keras.layers.Conv2D(4, 3, padding='same', activation='relu'),
|
||||
@ -308,7 +309,7 @@ class ActivationV2IntegrationTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(10,),
|
||||
num_classes=2)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
|
||||
model = keras.Sequential([
|
||||
keras.layers.Flatten(input_shape=x_train.shape[1:]),
|
||||
|
@ -26,6 +26,7 @@ from tensorflow.python.eager import context
|
||||
from tensorflow.python.framework import test_util as tf_test_util
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
@ -108,7 +109,7 @@ class GRULayerTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(timesteps, embedding_dim),
|
||||
num_classes=units)
|
||||
y_train = keras.utils.to_categorical(y_train, units)
|
||||
y_train = np_utils.to_categorical(y_train, units)
|
||||
|
||||
inputs = keras.layers.Input(shape=[timesteps, embedding_dim])
|
||||
gru_layer = keras.layers.GRU(units,
|
||||
|
@ -38,6 +38,7 @@ from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.layers import recurrent as rnn_v1
|
||||
from tensorflow.python.keras.layers import recurrent_v2 as rnn
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
from tensorflow.python.ops import gen_math_ops
|
||||
@ -91,7 +92,7 @@ class GRUV2Test(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(timestep, input_shape),
|
||||
num_classes=output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train, output_shape)
|
||||
y_train = np_utils.to_categorical(y_train, output_shape)
|
||||
|
||||
layer = rnn.GRU(rnn_state_size)
|
||||
|
||||
@ -150,7 +151,7 @@ class GRUV2Test(keras_parameterized.TestCase):
|
||||
input_shape=(timestep, input_shape),
|
||||
num_classes=rnn_state_size,
|
||||
random_seed=random_seed.DEFAULT_GRAPH_SEED)
|
||||
y_train = keras.utils.to_categorical(y_train, rnn_state_size)
|
||||
y_train = np_utils.to_categorical(y_train, rnn_state_size)
|
||||
# For the last batch item of the test data, we filter out the last
|
||||
# timestep to simulate the variable length sequence and masking test.
|
||||
x_train[-2:, -1, :] = 0.0
|
||||
@ -619,7 +620,7 @@ class GRUGraphRewriteTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(self.timestep, self.input_shape),
|
||||
num_classes=self.output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train, self.output_shape)
|
||||
y_train = np_utils.to_categorical(y_train, self.output_shape)
|
||||
|
||||
model.compile(
|
||||
optimizer='sgd',
|
||||
@ -679,7 +680,7 @@ class GRUGraphRewriteTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(self.timestep, self.input_shape),
|
||||
num_classes=self.output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train, self.output_shape)
|
||||
y_train = np_utils.to_categorical(y_train, self.output_shape)
|
||||
|
||||
model.compile(
|
||||
optimizer='sgd',
|
||||
|
@ -39,6 +39,7 @@ from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.layers import recurrent as rnn_v1
|
||||
from tensorflow.python.keras.layers import recurrent_v2 as rnn
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
from tensorflow.python.ops import gen_math_ops
|
||||
@ -323,7 +324,7 @@ class LSTMV2Test(keras_parameterized.TestCase):
|
||||
input_shape=(timestep, input_shape),
|
||||
num_classes=rnn_state_size,
|
||||
random_seed=random_seed.DEFAULT_GRAPH_SEED)
|
||||
y_train = keras.utils.to_categorical(y_train, rnn_state_size)
|
||||
y_train = np_utils.to_categorical(y_train, rnn_state_size)
|
||||
# For the last batch item of the test data, we filter out the last
|
||||
# timestep to simulate the variable length sequence and masking test.
|
||||
x_train[-2:, -1, :] = 0.0
|
||||
@ -465,7 +466,7 @@ class LSTMV2Test(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(timestep, input_shape),
|
||||
num_classes=output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train, output_shape)
|
||||
y_train = np_utils.to_categorical(y_train, output_shape)
|
||||
|
||||
layer = rnn.LSTM(rnn_state_size)
|
||||
|
||||
@ -799,7 +800,7 @@ class LSTMGraphRewriteTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(self.timestep, self.input_shape),
|
||||
num_classes=self.output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train, self.output_shape)
|
||||
y_train = np_utils.to_categorical(y_train, self.output_shape)
|
||||
|
||||
model.compile(
|
||||
optimizer='sgd',
|
||||
@ -860,7 +861,7 @@ class LSTMGraphRewriteTest(keras_parameterized.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(self.timestep, self.input_shape),
|
||||
num_classes=self.output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train, self.output_shape)
|
||||
y_train = np_utils.to_categorical(y_train, self.output_shape)
|
||||
|
||||
model.compile(
|
||||
optimizer='sgd',
|
||||
@ -1020,7 +1021,7 @@ class LSTMPerformanceTest(test.Benchmark):
|
||||
test_samples=0,
|
||||
input_shape=(test_config['timestep'], test_config['input_shape']),
|
||||
num_classes=test_config['output_shape'])
|
||||
y_train = keras.utils.to_categorical(y_train, test_config['output_shape'])
|
||||
y_train = np_utils.to_categorical(y_train, test_config['output_shape'])
|
||||
|
||||
cudnn_sec_per_epoch = self._time_performance_run_cudnn_lstm(
|
||||
test_config, x_train, y_train)
|
||||
|
@ -38,6 +38,7 @@ from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.engine import base_layer_utils
|
||||
from tensorflow.python.keras.layers import recurrent as rnn_v1
|
||||
from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2
|
||||
from tensorflow.python.keras.utils import generic_utils
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import init_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
@ -196,7 +197,7 @@ class RNNTest(keras_parameterized.TestCase):
|
||||
y_np = model.predict(x_np)
|
||||
weights = model.get_weights()
|
||||
config = layer.get_config()
|
||||
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
|
||||
with generic_utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
|
||||
layer = keras.layers.RNN.from_config(config)
|
||||
y = layer(x)
|
||||
model = keras.models.Model(x, y)
|
||||
@ -223,7 +224,7 @@ class RNNTest(keras_parameterized.TestCase):
|
||||
y_np = model.predict(x_np)
|
||||
weights = model.get_weights()
|
||||
config = layer.get_config()
|
||||
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
|
||||
with generic_utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
|
||||
layer = keras.layers.RNN.from_config(config)
|
||||
y = layer(x)
|
||||
model = keras.models.Model(x, y)
|
||||
@ -417,7 +418,7 @@ class RNNTest(keras_parameterized.TestCase):
|
||||
weights = model.get_weights()
|
||||
config = layer.get_config()
|
||||
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.RNN.from_config(config.copy())
|
||||
y = layer(x, constants=c)
|
||||
model = keras.models.Model([x, c], y)
|
||||
@ -426,7 +427,7 @@ class RNNTest(keras_parameterized.TestCase):
|
||||
self.assertAllClose(y_np, y_np_2, atol=1e-4)
|
||||
|
||||
# test flat list inputs.
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.RNN.from_config(config.copy())
|
||||
y = layer([x, c])
|
||||
model = keras.models.Model([x, c], y)
|
||||
@ -474,7 +475,7 @@ class RNNTest(keras_parameterized.TestCase):
|
||||
y_np = model.predict([x_np, c_np])
|
||||
weights = model.get_weights()
|
||||
config = layer.get_config()
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.recurrent.RNN.from_config(config.copy())
|
||||
y = layer(x, constants=c)
|
||||
model = keras.models.Model([x, c], y)
|
||||
@ -539,7 +540,7 @@ class RNNTest(keras_parameterized.TestCase):
|
||||
weights = model.get_weights()
|
||||
config = layer.get_config()
|
||||
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.RNN.from_config(config.copy())
|
||||
y = layer(x, initial_state=s, constants=c)
|
||||
model = keras.models.Model([x, s, c], y)
|
||||
@ -553,7 +554,7 @@ class RNNTest(keras_parameterized.TestCase):
|
||||
self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4)
|
||||
|
||||
# test flat list inputs
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.RNN.from_config(config.copy())
|
||||
y = layer([x, s, c])
|
||||
model = keras.models.Model([x, s, c], y)
|
||||
|
@ -32,6 +32,7 @@ from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.engine import base_layer_utils
|
||||
from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import ResidualWrapper
|
||||
from tensorflow.python.keras.utils import generic_utils
|
||||
from tensorflow.python.ops.array_ops import concat
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.training.tracking import util as trackable_util
|
||||
@ -685,7 +686,7 @@ class BidirectionalTest(test.TestCase, parameterized.TestCase):
|
||||
c = keras.Input((3,))
|
||||
cell = _RNNCellWithConstants(32, 3)
|
||||
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
|
||||
y = layer(x, constants=c)
|
||||
model = keras.Model([x, c], y)
|
||||
@ -702,7 +703,7 @@ class BidirectionalTest(test.TestCase, parameterized.TestCase):
|
||||
weights = model.get_weights()
|
||||
config = layer.get_config()
|
||||
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
|
||||
y = layer(x, constants=c)
|
||||
model = keras.Model([x, c], y)
|
||||
@ -711,7 +712,7 @@ class BidirectionalTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertAllClose(y_np, y_np_2, atol=1e-4)
|
||||
|
||||
# Test flat list inputs
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
|
||||
y = layer([x, c])
|
||||
model = keras.Model([x, c], y)
|
||||
@ -728,7 +729,7 @@ class BidirectionalTest(test.TestCase, parameterized.TestCase):
|
||||
s_bac = keras.Input((32,))
|
||||
cell = _RNNCellWithConstants(32, 3)
|
||||
custom_objects = {'_RNNCellWithConstants': _RNNCellWithConstants}
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.Bidirectional(keras.layers.RNN(cell))
|
||||
y = layer(x, initial_state=[s_for, s_bac], constants=c)
|
||||
model = keras.Model([x, s_for, s_bac, c], y)
|
||||
@ -750,7 +751,7 @@ class BidirectionalTest(test.TestCase, parameterized.TestCase):
|
||||
weights = model.get_weights()
|
||||
config = layer.get_config()
|
||||
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
|
||||
y = layer(x, initial_state=[s_for, s_bac], constants=c)
|
||||
model = keras.Model([x, s_for, s_bac, c], y)
|
||||
@ -764,7 +765,7 @@ class BidirectionalTest(test.TestCase, parameterized.TestCase):
|
||||
assert np.mean(y_np - y_np_2_different_s) != 0
|
||||
|
||||
# Test flat list inputs
|
||||
with keras.utils.CustomObjectScope(custom_objects):
|
||||
with generic_utils.CustomObjectScope(custom_objects):
|
||||
layer = keras.layers.Bidirectional.from_config(copy.deepcopy(config))
|
||||
y = layer([x, s_for, s_bac, c])
|
||||
model = keras.Model([x, s_for, s_bac, c], y)
|
||||
|
@ -28,6 +28,7 @@ from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras.utils import generic_utils
|
||||
from tensorflow.python.keras.utils import losses_utils
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
@ -200,10 +201,10 @@ class KerasLossesTest(test.TestCase):
|
||||
|
||||
def test_serializing_loss_class(self):
|
||||
orig_loss_class = _MSEMAELoss(0.3)
|
||||
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
|
||||
with generic_utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
|
||||
serialized = keras.losses.serialize(orig_loss_class)
|
||||
|
||||
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
|
||||
with generic_utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
|
||||
deserialized = keras.losses.deserialize(serialized)
|
||||
assert isinstance(deserialized, _MSEMAELoss)
|
||||
assert deserialized.mse_fraction == 0.3
|
||||
@ -214,7 +215,7 @@ class KerasLossesTest(test.TestCase):
|
||||
model_filename = os.path.join(tmpdir, 'custom_loss.h5')
|
||||
|
||||
with self.cached_session():
|
||||
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
|
||||
with generic_utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
|
||||
loss = _MSEMAELoss(0.3)
|
||||
inputs = keras.layers.Input((2,))
|
||||
outputs = keras.layers.Dense(1, name='model_output')(inputs)
|
||||
@ -227,7 +228,7 @@ class KerasLossesTest(test.TestCase):
|
||||
|
||||
model.save(model_filename)
|
||||
|
||||
with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
|
||||
with generic_utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}):
|
||||
loaded_model = keras.models.load_model(model_filename)
|
||||
loaded_model.predict(np.random.rand(128, 2))
|
||||
|
||||
|
@ -50,6 +50,7 @@ from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
|
||||
from tensorflow.python.keras.optimizer_v2 import nadam
|
||||
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
|
||||
from tensorflow.python.keras.optimizer_v2 import rmsprop
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import clip_ops
|
||||
from tensorflow.python.ops import resource_variable_ops
|
||||
@ -556,7 +557,7 @@ class OptimizerTest(test.TestCase):
|
||||
test_samples=10,
|
||||
input_shape=(input_dim,),
|
||||
num_classes=num_classes)
|
||||
y = keras.utils.to_categorical(y)
|
||||
y = np_utils.to_categorical(y)
|
||||
|
||||
num_hidden = 1
|
||||
model = testing_utils.get_small_sequential_mlp(
|
||||
@ -635,7 +636,7 @@ class OptimizersCompatibilityTest(keras_parameterized.TestCase):
|
||||
test_samples=10,
|
||||
input_shape=(input_dim,),
|
||||
num_classes=num_classes)
|
||||
y = keras.utils.to_categorical(y)
|
||||
y = np_utils.to_categorical(y)
|
||||
|
||||
num_hidden = 5
|
||||
model_v1 = testing_utils.get_small_sequential_mlp(
|
||||
@ -723,7 +724,7 @@ class OptimizersCompatibilityTest(keras_parameterized.TestCase):
|
||||
test_samples=10,
|
||||
input_shape=(input_dim,),
|
||||
num_classes=num_classes)
|
||||
y = keras.utils.to_categorical(y)
|
||||
y = np_utils.to_categorical(y)
|
||||
|
||||
num_hidden = 5
|
||||
model_k_v1 = testing_utils.get_small_sequential_mlp(
|
||||
@ -784,7 +785,7 @@ class OptimizersCompatibilityTest(keras_parameterized.TestCase):
|
||||
test_samples=10,
|
||||
input_shape=(input_dim,),
|
||||
num_classes=num_classes)
|
||||
y = keras.utils.to_categorical(y)
|
||||
y = np_utils.to_categorical(y)
|
||||
|
||||
num_hidden = 5
|
||||
model_k_v1 = testing_utils.get_small_sequential_mlp(
|
||||
|
@ -28,6 +28,7 @@ from tensorflow.python.eager import context
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.training.adam import AdamOptimizer
|
||||
|
||||
@ -54,7 +55,7 @@ class KerasOptimizersTest(keras_parameterized.TestCase):
|
||||
np.random.seed(1337)
|
||||
(x_train, y_train), _ = testing_utils.get_test_data(
|
||||
train_samples=1000, test_samples=200, input_shape=(10,), num_classes=2)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
model = _get_model(x_train.shape[1], 20, y_train.shape[1])
|
||||
model.compile(
|
||||
loss='categorical_crossentropy',
|
||||
|
@ -18,15 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import keras_preprocessing
|
||||
|
||||
from tensorflow.python.keras import backend
|
||||
from tensorflow.python.keras import utils
|
||||
|
||||
# This exists for compatibility with prior version of keras_preprocessing.
|
||||
# TODO(fchollet): remove in the future.
|
||||
keras_preprocessing.set_keras_submodules(backend=backend, utils=utils)
|
||||
|
||||
from tensorflow.python.keras.preprocessing import image
|
||||
from tensorflow.python.keras.preprocessing import sequence
|
||||
from tensorflow.python.keras.preprocessing import text
|
||||
|
@ -28,7 +28,7 @@ except ImportError:
|
||||
pass
|
||||
|
||||
from tensorflow.python.keras import backend
|
||||
from tensorflow.python.keras import utils
|
||||
from tensorflow.python.keras.utils import data_utils
|
||||
from tensorflow.python.util import tf_inspect
|
||||
from tensorflow.python.util.tf_export import keras_export
|
||||
|
||||
@ -132,7 +132,7 @@ def save_img(path,
|
||||
|
||||
|
||||
@keras_export('keras.preprocessing.image.Iterator')
|
||||
class Iterator(image.Iterator, utils.Sequence):
|
||||
class Iterator(image.Iterator, data_utils.Sequence):
|
||||
pass
|
||||
|
||||
|
||||
|
@ -21,7 +21,7 @@ from __future__ import print_function
|
||||
|
||||
from keras_preprocessing import sequence
|
||||
|
||||
from tensorflow.python.keras import utils
|
||||
from tensorflow.python.keras.utils import data_utils
|
||||
from tensorflow.python.util.tf_export import keras_export
|
||||
|
||||
pad_sequences = sequence.pad_sequences
|
||||
@ -32,7 +32,7 @@ _remove_long_seq = sequence._remove_long_seq # pylint: disable=protected-access
|
||||
|
||||
|
||||
@keras_export('keras.preprocessing.sequence.TimeseriesGenerator')
|
||||
class TimeseriesGenerator(sequence.TimeseriesGenerator, utils.Sequence):
|
||||
class TimeseriesGenerator(sequence.TimeseriesGenerator, data_utils.Sequence):
|
||||
"""Utility class for generating batches of temporal data.
|
||||
This class takes in a sequence of data-points gathered at
|
||||
equal intervals, along with time series parameters such as
|
||||
|
@ -26,6 +26,7 @@ from tensorflow.python.eager import context
|
||||
from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import regularizers
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
@ -51,8 +52,8 @@ class KerasRegularizersTest(keras_parameterized.TestCase,
|
||||
test_samples=10,
|
||||
input_shape=(DATA_DIM,),
|
||||
num_classes=NUM_CLASSES)
|
||||
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
|
||||
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
|
||||
y_train = np_utils.to_categorical(y_train, NUM_CLASSES)
|
||||
y_test = np_utils.to_categorical(y_test, NUM_CLASSES)
|
||||
return (x_train, y_train), (x_test, y_test)
|
||||
|
||||
def create_multi_input_model_from(self, layer1, layer2):
|
||||
|
@ -1,46 +0,0 @@
|
||||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Keras utilities."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer
|
||||
from tensorflow.python.keras.utils.data_utils import get_file
|
||||
from tensorflow.python.keras.utils.data_utils import OrderedEnqueuer
|
||||
from tensorflow.python.keras.utils.data_utils import Sequence
|
||||
from tensorflow.python.keras.utils.data_utils import SequenceEnqueuer
|
||||
from tensorflow.python.keras.utils.generic_utils import class_and_config_for_serialized_keras_object
|
||||
from tensorflow.python.keras.utils.generic_utils import custom_object_scope
|
||||
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
|
||||
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
|
||||
from tensorflow.python.keras.utils.generic_utils import get_custom_objects
|
||||
from tensorflow.python.keras.utils.generic_utils import Progbar
|
||||
from tensorflow.python.keras.utils.generic_utils import serialize_keras_class_and_config
|
||||
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
|
||||
from tensorflow.python.keras.utils.io_utils import HDF5Matrix
|
||||
from tensorflow.python.keras.utils.layer_utils import convert_all_kernels_in_model
|
||||
from tensorflow.python.keras.utils.layer_utils import get_source_inputs
|
||||
from tensorflow.python.keras.utils.layer_utils import print_summary
|
||||
from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
|
||||
from tensorflow.python.keras.utils.np_utils import normalize
|
||||
from tensorflow.python.keras.utils.np_utils import to_categorical
|
||||
from tensorflow.python.keras.utils.vis_utils import model_to_dot
|
||||
from tensorflow.python.keras.utils.vis_utils import plot_model
|
||||
|
||||
del absolute_import
|
||||
del division
|
||||
del print_function
|
48
tensorflow/python/keras/utils/all_utils.py
Normal file
48
tensorflow/python/keras/utils/all_utils.py
Normal file
@ -0,0 +1,48 @@
|
||||
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Public Keras utilities.
|
||||
|
||||
This module is used as a shortcut to access all the symbols. Those symbols was
|
||||
exposed under __init__, and was causing some hourglass import issue.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
# pylint: disable=unused-import
|
||||
from tensorflow.python.keras.utils.data_utils import GeneratorEnqueuer
|
||||
from tensorflow.python.keras.utils.data_utils import get_file
|
||||
from tensorflow.python.keras.utils.data_utils import OrderedEnqueuer
|
||||
from tensorflow.python.keras.utils.data_utils import Sequence
|
||||
from tensorflow.python.keras.utils.data_utils import SequenceEnqueuer
|
||||
from tensorflow.python.keras.utils.generic_utils import custom_object_scope
|
||||
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
|
||||
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
|
||||
from tensorflow.python.keras.utils.generic_utils import get_custom_objects
|
||||
from tensorflow.python.keras.utils.generic_utils import Progbar
|
||||
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
|
||||
from tensorflow.python.keras.utils.io_utils import HDF5Matrix
|
||||
from tensorflow.python.keras.utils.layer_utils import convert_all_kernels_in_model
|
||||
from tensorflow.python.keras.utils.layer_utils import get_source_inputs
|
||||
from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
|
||||
from tensorflow.python.keras.utils.np_utils import normalize
|
||||
from tensorflow.python.keras.utils.np_utils import to_categorical
|
||||
from tensorflow.python.keras.utils.vis_utils import model_to_dot
|
||||
from tensorflow.python.keras.utils.vis_utils import plot_model
|
||||
|
||||
del absolute_import
|
||||
del division
|
||||
del print_function
|
@ -21,6 +21,7 @@ import numpy as np
|
||||
|
||||
from tensorflow.python import data
|
||||
from tensorflow.python import keras
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
@ -149,7 +150,7 @@ class TestMultiGPUModel(test.TestCase):
|
||||
x_train = np.random.randint(0, 255, input_shape)
|
||||
y_train = np.random.randint(0, num_classes, (input_shape[0],))
|
||||
|
||||
y_train = keras.utils.to_categorical(y_train, num_classes)
|
||||
y_train = np_utils.to_categorical(y_train, num_classes)
|
||||
|
||||
x_train = x_train.astype('float32')
|
||||
y_train = y_train.astype('float32')
|
||||
|
@ -20,7 +20,7 @@ from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python import keras
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
@ -34,7 +34,7 @@ class TestNPUtils(test.TestCase):
|
||||
(3, 2, num_classes)]
|
||||
labels = [np.random.randint(0, num_classes, shape) for shape in shapes]
|
||||
one_hots = [
|
||||
keras.utils.to_categorical(label, num_classes) for label in labels]
|
||||
np_utils.to_categorical(label, num_classes) for label in labels]
|
||||
for label, one_hot, expected_shape in zip(labels,
|
||||
one_hots,
|
||||
expected_shapes):
|
||||
|
@ -36,6 +36,7 @@ from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.engine import network as keras_network
|
||||
from tensorflow.python.keras.utils import np_utils
|
||||
from tensorflow.python.layers import base as base_layers
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import control_flow_ops
|
||||
@ -372,7 +373,7 @@ class RNNTest(test.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(timestep, input_shape),
|
||||
num_classes=output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
cell = keras.layers.SimpleRNNCell(output_shape)
|
||||
|
||||
inputs = array_ops.placeholder(
|
||||
@ -406,7 +407,7 @@ class RNNTest(test.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(timestep, input_shape),
|
||||
num_classes=output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
cell = keras.layers.GRUCell(output_shape)
|
||||
|
||||
inputs = array_ops.placeholder(
|
||||
@ -440,7 +441,7 @@ class RNNTest(test.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(timestep, input_shape),
|
||||
num_classes=output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
cell = keras.layers.LSTMCell(output_shape)
|
||||
|
||||
inputs = array_ops.placeholder(
|
||||
@ -478,7 +479,7 @@ class RNNTest(test.TestCase):
|
||||
test_samples=0,
|
||||
input_shape=(timestep, input_shape),
|
||||
num_classes=output_shape)
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
cell = keras.layers.StackedRNNCells(
|
||||
[keras.layers.LSTMCell(2 * output_shape),
|
||||
keras.layers.LSTMCell(output_shape)])
|
||||
@ -523,7 +524,7 @@ class RNNTest(test.TestCase):
|
||||
input_shape=(timestep, input_shape),
|
||||
num_classes=output_shape)
|
||||
x_train = np.transpose(x_train, (1, 0, 2))
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
y_train = np_utils.to_categorical(y_train)
|
||||
cell = keras.layers.SimpleRNNCell(output_shape)
|
||||
|
||||
inputs = [array_ops.placeholder(
|
||||
|
Loading…
Reference in New Issue
Block a user