From d131c5d35b9ef01b6cc5776ca6b0725f83ad35b6 Mon Sep 17 00:00:00 2001 From: a7744hsc Date: Fri, 4 Nov 2016 01:55:27 +0800 Subject: [PATCH] Update the misleading comment for cifar10.py's softmax_linear layer (#5259) * Update the misunderstanding comment for cifar10.py A fix for the issue #5251, make the comment more meaningful. * Update comment to be a bit more precise. * wrap to 80 --- tensorflow/models/image/cifar10/cifar10.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tensorflow/models/image/cifar10/cifar10.py b/tensorflow/models/image/cifar10/cifar10.py index fb3a42cbb13..7df2149d40d 100644 --- a/tensorflow/models/image/cifar10/cifar10.py +++ b/tensorflow/models/image/cifar10/cifar10.py @@ -256,7 +256,10 @@ def inference(images): local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name) _activation_summary(local4) - # softmax, i.e. softmax(WX + b) + # linear layer(WX + b), + # We don't apply softmax here because + # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits + # and performs the softmax internally for efficiency. with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=1/192.0, wd=0.0)