Merge pull request #46221 from ROCmSoftwarePlatform:google_upstream_rocm_raise_cap_2GB

PiperOrigin-RevId: 350549950
Change-Id: I3149989eb89ad572a054906e16f780f8aec8b526
This commit is contained in:
TensorFlower Gardener 2021-01-07 06:52:34 -08:00
commit 14246bd028
2 changed files with 2 additions and 8 deletions

View File

@ -110,7 +110,6 @@ cuda_py_test(
cuda_py_test(
name = "np_interop_test",
srcs = ["np_interop_test.py"],
tags = ["no_rocm"],
deps = [
":numpy",
"//tensorflow:tensorflow_py",

View File

@ -23,13 +23,8 @@
TF_GPU_COUNT=${TF_GPU_COUNT:-4}
TF_TESTS_PER_GPU=${TF_TESTS_PER_GPU:-8}
# We want to allow running one of the following configs:
# - 4 tests per GPU on k80
# - 8 tests per GPU on p100
# p100 has minimum 12G memory. Therefore, we should limit each test to 1.5G.
# To leave some room in case we want to run more tests in parallel in the
# future and to use a rounder number, we set it to 1G.
export TF_PER_DEVICE_MEMORY_LIMIT_MB=${TF_PER_DEVICE_MEMORY_LIMIT_MB:-1024}
export TF_PER_DEVICE_MEMORY_LIMIT_MB=${TF_PER_DEVICE_MEMORY_LIMIT_MB:-2048}
# *******************************************************************
# This section of the script is needed to