Merge pull request #35891 from ROCmSoftwarePlatform:google_upstream_resnet50
PiperOrigin-RevId: 291010290 Change-Id: I59831d2a93bad687473db9899f4ee7ffd13078bd
This commit is contained in:
commit
82273f00d5
@ -35,7 +35,7 @@ def _forward_over_back_hvp(model, images, labels, vector):
|
||||
model.trainable_variables, vector) as acc:
|
||||
with tf.GradientTape() as grad_tape:
|
||||
logits = model(images, training=True)
|
||||
loss = tf.losses.softmax_cross_entropy(
|
||||
loss = tf.compat.v1.losses.softmax_cross_entropy(
|
||||
logits=logits, onehot_labels=labels)
|
||||
grads = grad_tape.gradient(loss, model.trainable_variables)
|
||||
return acc.jvp(grads)
|
||||
@ -47,7 +47,7 @@ def _back_over_forward_hvp(model, images, labels, vector):
|
||||
with forwardprop.ForwardAccumulator(
|
||||
model.trainable_variables, vector) as acc:
|
||||
logits = model(images, training=True)
|
||||
loss = tf.losses.softmax_cross_entropy(
|
||||
loss = tf.compat.v1.losses.softmax_cross_entropy(
|
||||
logits=logits, onehot_labels=labels)
|
||||
return grad_tape.gradient(acc.jvp(loss), model.trainable_variables)
|
||||
|
||||
@ -55,7 +55,7 @@ def _back_over_forward_hvp(model, images, labels, vector):
|
||||
def _tf_gradients_forward_over_back_hvp(model, images, labels, vector):
|
||||
with tf.GradientTape() as grad_tape:
|
||||
logits = model(images, training=True)
|
||||
loss = tf.losses.softmax_cross_entropy(
|
||||
loss = tf.compat.v1.losses.softmax_cross_entropy(
|
||||
logits=logits, onehot_labels=labels)
|
||||
variables = model.trainable_variables
|
||||
grads = grad_tape.gradient(loss, variables)
|
||||
@ -68,7 +68,7 @@ def _back_over_back_hvp(model, images, labels, vector):
|
||||
with tf.GradientTape() as outer_tape:
|
||||
with tf.GradientTape() as inner_tape:
|
||||
logits = model(images, training=True)
|
||||
loss = tf.losses.softmax_cross_entropy(
|
||||
loss = tf.compat.v1.losses.softmax_cross_entropy(
|
||||
logits=logits, onehot_labels=labels)
|
||||
grads = inner_tape.gradient(loss, model.trainable_variables)
|
||||
return outer_tape.gradient(
|
||||
|
@ -109,7 +109,7 @@ class ResNet50Benchmarks(tf.test.Benchmark):
|
||||
|
||||
model = resnet50.ResNet50(data_format())
|
||||
logits = model(images, training=True)
|
||||
loss = tf.losses.softmax_cross_entropy(
|
||||
loss = tf.compat.v1.losses.softmax_cross_entropy(
|
||||
logits=logits, onehot_labels=labels)
|
||||
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
|
||||
train_op = optimizer.minimize(loss)
|
||||
|
@ -36,7 +36,7 @@ from tensorflow.python.eager.benchmarks.resnet50 import resnet50_test_util
|
||||
def compute_gradients(model, images, labels, num_replicas=1):
|
||||
with tf.GradientTape() as grad_tape:
|
||||
logits = model(images, training=True)
|
||||
loss = tf.losses.softmax_cross_entropy(
|
||||
loss = tf.compat.v1.losses.softmax_cross_entropy(
|
||||
logits=logits, onehot_labels=labels)
|
||||
tf.compat.v2.summary.write('loss', loss)
|
||||
if num_replicas != 1:
|
||||
|
@ -35,9 +35,11 @@ def random_batch(batch_size, data_format):
|
||||
shape = (batch_size,) + shape
|
||||
|
||||
num_classes = 1000
|
||||
images = tf.random_uniform(shape)
|
||||
labels = tf.random_uniform(
|
||||
[batch_size], minval=0, maxval=num_classes, dtype=tf.int32)
|
||||
images = tf.random.uniform(shape)
|
||||
labels = tf.random.uniform([batch_size],
|
||||
minval=0,
|
||||
maxval=num_classes,
|
||||
dtype=tf.int32)
|
||||
one_hot = tf.one_hot(labels, num_classes)
|
||||
|
||||
return images, one_hot
|
||||
|
Loading…
Reference in New Issue
Block a user