From e55e45aef70788e66405b4fe79a76e6caed78b11 Mon Sep 17 00:00:00 2001 From: Shanqing Cai Date: Thu, 19 May 2016 08:05:53 -0800 Subject: [PATCH] Fix a python3 compatibility issue in shell scripts Change: 122737159 --- .../tools/ci_build/builds/test_tutorials.sh | 20 +++++++++---------- .../dist_test/scripts/dist_mnist_test.sh | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/tensorflow/tools/ci_build/builds/test_tutorials.sh b/tensorflow/tools/ci_build/builds/test_tutorials.sh index a0a78631c3d..c5ed11c6eba 100644 --- a/tensorflow/tools/ci_build/builds/test_tutorials.sh +++ b/tensorflow/tools/ci_build/builds/test_tutorials.sh @@ -120,8 +120,8 @@ test_mnist_softmax() { # Check final accuracy FINAL_ACCURACY=$(tail -1 "${LOG_FILE}") - if [[ $(python -c "print ${FINAL_ACCURACY}>0.85") != "True" ]] || - [[ $(python -c "print ${FINAL_ACCURACY}<=1.00") != "True" ]]; then + if [[ $(python -c "print(${FINAL_ACCURACY}>0.85)") != "True" ]] || + [[ $(python -c "print(${FINAL_ACCURACY}<=1.00)") != "True" ]]; then echo "mnist_softmax accuracy check FAILED: "\ "FINAL_ACCURACY = ${FINAL_ACCURACY}" return 1 @@ -150,8 +150,8 @@ test_mnist_with_summaries() { # Verify final accuracy FINAL_ACCURACY=$(tail -1 "${LOG_FILE}" | awk '{print $NF}') - if [[ $(python -c "print ${FINAL_ACCURACY}>0.85") != "True" ]] || - [[ $(python -c "print ${FINAL_ACCURACY}<=1.00") != "True" ]]; then + if [[ $(python -c "print(${FINAL_ACCURACY}>0.85)") != "True" ]] || + [[ $(python -c "print(${FINAL_ACCURACY}<=1.00)") != "True" ]]; then echo "mnist_with_summaries accuracy check FAILED: ${FINAL_ACCURACY}<0.90" return 1 fi @@ -188,9 +188,9 @@ test_cifar10_train() { FINAL_LOSS=$(grep -o "loss = [0-9\.]*" "${LOG_FILE}" | tail -1 | \ awk '{print $NF}') - if [[ $(python -c "print ${FINAL_LOSS}<${INIT_LOSS}") != "True" ]] || - [[ $(python -c "print ${INIT_LOSS}>=0") != "True" ]] || - [[ $(python -c "print ${FINAL_LOSS}>=0") != "True" ]]; then + if [[ $(python -c "print(${FINAL_LOSS}<${INIT_LOSS})") != "True" ]] || + [[ $(python -c "print(${INIT_LOSS}>=0)") != "True" ]] || + [[ $(python -c "print(${FINAL_LOSS}>=0)") != "True" ]]; then echo "cifar10_train loss check FAILED: "\ "FINAL_LOSS = ${FINAL_LOSS}; INIT_LOSS = ${INIT_LOSS}" return 1 @@ -269,9 +269,9 @@ test_ptb_word_lm() { echo "INIT_PERPL=${INIT_PERPL}" echo "FINAL_PERPL=${FINAL_PERPL}" - if [[ $(python -c "print ${FINAL_PERPL}<${INIT_PERPL}") != "True" ]] || - [[ $(python -c "print ${INIT_PERPL}>=0") != "True" ]] || - [[ $(python -c "print ${FINAL_PERPL}>=0") != "True" ]]; then + if [[ $(python -c "print(${FINAL_PERPL}<${INIT_PERPL})") != "True" ]] || + [[ $(python -c "print(${INIT_PERPL}>=0)") != "True" ]] || + [[ $(python -c "print(${FINAL_PERPL}>=0)") != "True" ]]; then echo "ptb_word_lm perplexity check FAILED: "\ "FINAL_PERPL = ${FINAL_PERPL}; INIT_PERPL = ${INIT_PERPL}" return 1 diff --git a/tensorflow/tools/dist_test/scripts/dist_mnist_test.sh b/tensorflow/tools/dist_test/scripts/dist_mnist_test.sh index 1401415aa09..fa0cdb20016 100755 --- a/tensorflow/tools/dist_test/scripts/dist_mnist_test.sh +++ b/tensorflow/tools/dist_test/scripts/dist_mnist_test.sh @@ -188,9 +188,9 @@ VAL_XENT=$(get_final_val_xent "${WKR_LOG_PREFIX}0.log") # Sanity check on the validation entropies # TODO(cais): In addition to this basic sanity check, we could run the training # with 1 and 2 workers, each for a few times and use scipy.stats to do a t-test -# to verify tha tthe 2-worker training gives significantly lower final cross +# to verify that the 2-worker training gives significantly lower final cross # entropy echo "Final validation cross entropy from worker0: ${VAL_XENT}" -if [[ $(python -c "print ${VAL_XENT}>0") != "True" ]]; then +if [[ $(python -c "print(${VAL_XENT}>0)") != "True" ]]; then die "Sanity checks on the final validation cross entropy values FAILED" fi