During non-eager execution there was a bit of inconsistency with the way we
handled the placeholders where as the actually fed inputs when the inputs were scalars. The standardize_single_array method expands dimensions so that (None,) inputs look like (None, 1) instead but the placeholder shapes were the old (None,). PiperOrigin-RevId: 255033566
This commit is contained in:
parent
f3e3f4b784
commit
64a9a35d4e
@ -94,6 +94,45 @@ class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
|
||||
model.evaluate(ds, steps=1)
|
||||
model.predict(ds, steps=1)
|
||||
|
||||
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
|
||||
def test_sequential_model_with_crossed_column(self):
|
||||
feature_columns = []
|
||||
age_buckets = fc.bucketized_column(
|
||||
fc.numeric_column('age'),
|
||||
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
|
||||
feature_columns.append(age_buckets)
|
||||
|
||||
# indicator cols
|
||||
thal = fc.categorical_column_with_vocabulary_list(
|
||||
'thal', ['fixed', 'normal', 'reversible'])
|
||||
|
||||
crossed_feature = fc.crossed_column([age_buckets, thal],
|
||||
hash_bucket_size=1000)
|
||||
crossed_feature = fc.indicator_column(crossed_feature)
|
||||
feature_columns.append(crossed_feature)
|
||||
|
||||
feature_layer = fc.DenseFeatures(feature_columns)
|
||||
|
||||
model = keras.models.Sequential([
|
||||
feature_layer,
|
||||
keras.layers.Dense(128, activation='relu'),
|
||||
keras.layers.Dense(128, activation='relu'),
|
||||
keras.layers.Dense(1, activation='sigmoid')
|
||||
])
|
||||
|
||||
age_data = np.random.randint(10, 100, size=100)
|
||||
thal_data = np.random.choice(['fixed', 'normal', 'reversible'], size=100)
|
||||
inp_x = {'age': age_data, 'thal': thal_data}
|
||||
inp_y = np.random.randint(0, 1, size=100)
|
||||
ds = dataset_ops.Dataset.from_tensor_slices((inp_x, inp_y)).batch(5)
|
||||
model.compile(optimizer='adam',
|
||||
loss='binary_crossentropy',
|
||||
metrics=['accuracy'],)
|
||||
model.fit(ds, epochs=1)
|
||||
model.fit(ds, epochs=1)
|
||||
model.evaluate(ds)
|
||||
model.predict(ds)
|
||||
|
||||
@keras_parameterized.run_all_keras_modes
|
||||
def test_subclassed_model_with_feature_columns(self):
|
||||
col_a = fc.numeric_column('a')
|
||||
|
@ -250,7 +250,7 @@ class TrainingTest(keras_parameterized.TestCase):
|
||||
model = keras.Sequential([ReturnTraining()])
|
||||
model.compile('sgd', 'mse')
|
||||
hist = model.fit(x=np.array([0.]), y=np.array([0.]))
|
||||
self.assertAllClose(hist.history['loss'][0], (10000,))
|
||||
self.assertAllClose(hist.history['loss'][0], 10000)
|
||||
|
||||
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
|
||||
@keras_parameterized.run_all_keras_modes
|
||||
|
@ -1619,12 +1619,16 @@ class ModelInputs(object):
|
||||
# we have. The user should call `model._set_inputs(placeholders)`
|
||||
# to specify custom placeholders if the need arises.
|
||||
shape = (None,) + tuple(v.shape[1:])
|
||||
if shape == (None,):
|
||||
shape = (None, 1)
|
||||
dtype = dtypes.as_dtype(v.dtype)
|
||||
if dtype.is_floating:
|
||||
dtype = K.floatx()
|
||||
v = K.placeholder(shape=shape, name=k, dtype=dtype)
|
||||
elif isinstance(v, tensor_spec.TensorSpec):
|
||||
shape = (None,) + tuple(v.shape.as_list()[1:])
|
||||
if shape == (None,):
|
||||
shape = (None, 1)
|
||||
v = K.placeholder(shape=shape, name=k, dtype=v.dtype)
|
||||
|
||||
self._flattened_inputs[i] = v
|
||||
|
Loading…
x
Reference in New Issue
Block a user