This breaks multi-python:
The local gen_build_info rule calls into find_cuda_config, which only works in the remote image. This is additionally brittle: relying on TF_CUDA_VERSION being an action_env is poisoning our caches, and running find_cuda_conifg multiple times is bugprone. I think the better way to do this is to put the information from the repo_rule into a file template as part of the repo rule configuration (cuda_configure.bzl). Then we can just include that file, instead of trying to do that as part of the action. PiperOrigin-RevId: 311148754 Change-Id: I80daa8652a85b2a1897c15117e6422bfd21cee6a
This commit is contained in:
parent
d2bc2b66a3
commit
2ffde8a339
@ -264,7 +264,6 @@ py_library(
|
|||||||
deps = [
|
deps = [
|
||||||
":_pywrap_util_port",
|
":_pywrap_util_port",
|
||||||
":lib",
|
":lib",
|
||||||
":platform_build_info",
|
|
||||||
":pywrap_tfe",
|
":pywrap_tfe",
|
||||||
":util",
|
":util",
|
||||||
"//tensorflow/core:protos_all_py",
|
"//tensorflow/core:protos_all_py",
|
||||||
@ -329,24 +328,6 @@ tf_py_test(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
tf_py_test(
|
|
||||||
name = "sysconfig_test",
|
|
||||||
size = "small",
|
|
||||||
srcs = ["platform/sysconfig_test.py"],
|
|
||||||
data = [
|
|
||||||
"platform/sysconfig.py",
|
|
||||||
],
|
|
||||||
python_version = "PY3",
|
|
||||||
tags = [
|
|
||||||
"no_pip",
|
|
||||||
"no_windows",
|
|
||||||
],
|
|
||||||
deps = [
|
|
||||||
":platform",
|
|
||||||
":platform_test",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_py_test(
|
tf_py_test(
|
||||||
name = "flags_test",
|
name = "flags_test",
|
||||||
size = "small",
|
size = "small",
|
||||||
|
@ -601,7 +601,7 @@ def gpu_gru(inputs, init_h, kernel, recurrent_kernel, bias, mask, time_major,
|
|||||||
# (6 * units)
|
# (6 * units)
|
||||||
bias = array_ops.split(K.flatten(bias), 6)
|
bias = array_ops.split(K.flatten(bias), 6)
|
||||||
|
|
||||||
if build_info.build_info['is_cuda_build']:
|
if build_info.is_cuda_build:
|
||||||
# Note that the gate order for CuDNN is different from the canonical format.
|
# Note that the gate order for CuDNN is different from the canonical format.
|
||||||
# canonical format is [z, r, h], whereas CuDNN is [r, z, h]. The swap need
|
# canonical format is [z, r, h], whereas CuDNN is [r, z, h]. The swap need
|
||||||
# to be done for kernel, recurrent_kernel, input_bias, recurrent_bias.
|
# to be done for kernel, recurrent_kernel, input_bias, recurrent_bias.
|
||||||
@ -1361,7 +1361,7 @@ def gpu_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, mask,
|
|||||||
# so that mathematically it is same as the canonical LSTM implementation.
|
# so that mathematically it is same as the canonical LSTM implementation.
|
||||||
full_bias = array_ops.concat((array_ops.zeros_like(bias), bias), 0)
|
full_bias = array_ops.concat((array_ops.zeros_like(bias), bias), 0)
|
||||||
|
|
||||||
if build_info.build_info['is_rocm_build']:
|
if build_info.is_rocm_build:
|
||||||
# ROCm MIOpen's weight sequence for LSTM is different from both canonical
|
# ROCm MIOpen's weight sequence for LSTM is different from both canonical
|
||||||
# and Cudnn format
|
# and Cudnn format
|
||||||
# MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o]
|
# MIOpen: [i, f, o, c] Cudnn/Canonical: [i, f, c, o]
|
||||||
|
@ -25,10 +25,8 @@ from tensorflow.python.platform import test
|
|||||||
class BuildInfoTest(test.TestCase):
|
class BuildInfoTest(test.TestCase):
|
||||||
|
|
||||||
def testBuildInfo(self):
|
def testBuildInfo(self):
|
||||||
self.assertEqual(build_info.build_info['is_rocm_build'],
|
self.assertEqual(build_info.is_rocm_build, test.is_built_with_rocm())
|
||||||
test.is_built_with_rocm())
|
self.assertEqual(build_info.is_cuda_build, test.is_built_with_cuda())
|
||||||
self.assertEqual(build_info.build_info['is_cuda_build'],
|
|
||||||
test.is_built_with_cuda())
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -20,7 +20,6 @@ from __future__ import print_function
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
MSVCP_DLL_NAMES = "msvcp_dll_names"
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from tensorflow.python.platform import build_info
|
from tensorflow.python.platform import build_info
|
||||||
@ -43,9 +42,9 @@ def preload_check():
|
|||||||
# we load the Python extension, so that we can raise an actionable error
|
# we load the Python extension, so that we can raise an actionable error
|
||||||
# message if they are not found.
|
# message if they are not found.
|
||||||
import ctypes # pylint: disable=g-import-not-at-top
|
import ctypes # pylint: disable=g-import-not-at-top
|
||||||
if MSVCP_DLL_NAMES in build_info.build_info:
|
if hasattr(build_info, "msvcp_dll_names"):
|
||||||
missing = []
|
missing = []
|
||||||
for dll_name in build_info.build_info[MSVCP_DLL_NAMES].split(","):
|
for dll_name in build_info.msvcp_dll_names.split(","):
|
||||||
try:
|
try:
|
||||||
ctypes.WinDLL(dll_name)
|
ctypes.WinDLL(dll_name)
|
||||||
except OSError:
|
except OSError:
|
||||||
|
@ -24,7 +24,6 @@ import platform as _platform
|
|||||||
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as _CXX11_ABI_FLAG
|
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as _CXX11_ABI_FLAG
|
||||||
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as _MONOLITHIC_BUILD
|
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as _MONOLITHIC_BUILD
|
||||||
from tensorflow.python.framework.versions import VERSION as _VERSION
|
from tensorflow.python.framework.versions import VERSION as _VERSION
|
||||||
from tensorflow.python.platform import build_info
|
|
||||||
from tensorflow.python.util.tf_export import tf_export
|
from tensorflow.python.util.tf_export import tf_export
|
||||||
|
|
||||||
|
|
||||||
@ -85,30 +84,3 @@ def get_link_flags():
|
|||||||
else:
|
else:
|
||||||
flags.append('-l:libtensorflow_framework.so.%s' % ver)
|
flags.append('-l:libtensorflow_framework.so.%s' % ver)
|
||||||
return flags
|
return flags
|
||||||
|
|
||||||
|
|
||||||
@tf_export('sysconfig.get_build_info')
|
|
||||||
def get_build_info():
|
|
||||||
"""Get a dictionary describing TensorFlow's build environment.
|
|
||||||
|
|
||||||
Values are generated when TensorFlow is compiled, and are static for each
|
|
||||||
TensorFlow package. The return value is a dictionary with string keys such as:
|
|
||||||
|
|
||||||
- cuda_version
|
|
||||||
- cudnn_version
|
|
||||||
- tensorrt_version
|
|
||||||
- nccl_version
|
|
||||||
- is_cuda_build
|
|
||||||
- is_rocm_build
|
|
||||||
- msvcp_dll_names
|
|
||||||
- nvcuda_dll_name
|
|
||||||
- cudart_dll_name
|
|
||||||
- cudnn_dll_name
|
|
||||||
|
|
||||||
Note that the actual keys and values returned by this function is subject to
|
|
||||||
change across different versions of TensorFlow or across platforms.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A Dictionary describing TensorFlow's build environment.
|
|
||||||
"""
|
|
||||||
return build_info.build_info
|
|
||||||
|
@ -1,38 +0,0 @@
|
|||||||
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
# ==============================================================================
|
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
from tensorflow.python.platform import googletest
|
|
||||||
from tensorflow.python.platform import sysconfig
|
|
||||||
from tensorflow.python.platform import test
|
|
||||||
|
|
||||||
|
|
||||||
class SysconfigTest(googletest.TestCase):
|
|
||||||
|
|
||||||
def test_get_build_info_works(self):
|
|
||||||
build_info = sysconfig.get_build_info()
|
|
||||||
self.assertIsInstance(build_info, dict)
|
|
||||||
|
|
||||||
def test_rocm_cuda_info_matches(self):
|
|
||||||
build_info = sysconfig.get_build_info()
|
|
||||||
self.assertEqual(build_info["is_rocm_build"], test.is_built_with_rocm())
|
|
||||||
self.assertEqual(build_info["is_cuda_build"], test.is_built_with_cuda())
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
googletest.main()
|
|
@ -2593,10 +2593,6 @@ def tf_version_info_genrule(name, out):
|
|||||||
arguments = "--generate \"$@\" --git_tag_override=${GIT_TAG_OVERRIDE:-}",
|
arguments = "--generate \"$@\" --git_tag_override=${GIT_TAG_OVERRIDE:-}",
|
||||||
)
|
)
|
||||||
|
|
||||||
def dict_to_kv(d):
|
|
||||||
"""Convert a dictionary to a space-joined list of key=value pairs."""
|
|
||||||
return " " + " ".join(["%s=%s" % (k, v) for k, v in d.items()])
|
|
||||||
|
|
||||||
def tf_py_build_info_genrule(name, out):
|
def tf_py_build_info_genrule(name, out):
|
||||||
_local_genrule(
|
_local_genrule(
|
||||||
name = name,
|
name = name,
|
||||||
@ -2604,17 +2600,16 @@ def tf_py_build_info_genrule(name, out):
|
|||||||
exec_tool = "//tensorflow/tools/build_info:gen_build_info",
|
exec_tool = "//tensorflow/tools/build_info:gen_build_info",
|
||||||
arguments =
|
arguments =
|
||||||
"--raw_generate \"$@\" " +
|
"--raw_generate \"$@\" " +
|
||||||
" --key_value" +
|
" --is_config_cuda " + if_cuda("True", "False") +
|
||||||
" is_rocm_build=" + if_rocm("True", "False") +
|
" --is_config_rocm " + if_rocm("True", "False") +
|
||||||
" is_cuda_build=" + if_cuda("True", "False") +
|
" --key_value " +
|
||||||
# TODO(angerson) Can we reliably load CUDA compute capabilities here?
|
if_cuda(" cuda_version_number=${TF_CUDA_VERSION:-} cudnn_version_number=${TF_CUDNN_VERSION:-} ", "") +
|
||||||
if_windows(dict_to_kv({
|
if_windows(" msvcp_dll_names=msvcp140.dll,msvcp140_1.dll ", "") +
|
||||||
"msvcp_dll_names": "msvcp140.dll,msvcp140_1.dll",
|
if_windows_cuda(" ".join([
|
||||||
}), "") + if_windows_cuda(dict_to_kv({
|
"nvcuda_dll_name=nvcuda.dll",
|
||||||
"nvcuda_dll_name": "nvcuda.dll",
|
"cudart_dll_name=cudart64_$(echo $${TF_CUDA_VERSION:-} | sed \"s/\\.//\").dll",
|
||||||
"cudart_dll_name": "cudart64_$$(echo $${TF_CUDA_VERSION:-} | sed \"s/\\.//\").dll",
|
"cudnn_dll_name=cudnn64_${TF_CUDNN_VERSION:-}.dll",
|
||||||
"cudnn_dll_name": "cudnn64_$${TF_CUDNN_VERSION:-}.dll",
|
]), ""),
|
||||||
}), ""),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def cc_library_with_android_deps(
|
def cc_library_with_android_deps(
|
||||||
|
@ -8,10 +8,6 @@ tf_module {
|
|||||||
name: "MONOLITHIC_BUILD"
|
name: "MONOLITHIC_BUILD"
|
||||||
mtype: "<type \'int\'>"
|
mtype: "<type \'int\'>"
|
||||||
}
|
}
|
||||||
member_method {
|
|
||||||
name: "get_build_info"
|
|
||||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
|
||||||
}
|
|
||||||
member_method {
|
member_method {
|
||||||
name: "get_compile_flags"
|
name: "get_compile_flags"
|
||||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||||
|
@ -8,10 +8,6 @@ tf_module {
|
|||||||
name: "MONOLITHIC_BUILD"
|
name: "MONOLITHIC_BUILD"
|
||||||
mtype: "<type \'int\'>"
|
mtype: "<type \'int\'>"
|
||||||
}
|
}
|
||||||
member_method {
|
|
||||||
name: "get_build_info"
|
|
||||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
|
||||||
}
|
|
||||||
member_method {
|
member_method {
|
||||||
name: "get_compile_flags"
|
name: "get_compile_flags"
|
||||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||||
|
@ -14,7 +14,6 @@ py_binary(
|
|||||||
srcs_version = "PY2AND3",
|
srcs_version = "PY2AND3",
|
||||||
tags = ["no-remote-exec"],
|
tags = ["no-remote-exec"],
|
||||||
deps = [
|
deps = [
|
||||||
"//third_party/gpus:find_cuda_config",
|
|
||||||
"@six_archive//:six",
|
"@six_archive//:six",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Lint as: python3
|
# Lint as: python2, python3
|
||||||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@ -19,62 +19,50 @@ from __future__ import division
|
|||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
# CUDA library gathering is only valid in OSS
|
|
||||||
try:
|
|
||||||
from third_party.gpus import find_cuda_config # pylint: disable=g-import-not-at-top
|
|
||||||
except ImportError:
|
|
||||||
find_cuda_config = None
|
|
||||||
|
|
||||||
|
def write_build_info(filename, is_config_cuda, is_config_rocm, key_value_list):
|
||||||
def write_build_info(filename, key_value_list):
|
|
||||||
"""Writes a Python that describes the build.
|
"""Writes a Python that describes the build.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
filename: filename to write to.
|
filename: filename to write to.
|
||||||
|
is_config_cuda: Whether this build is using CUDA.
|
||||||
|
is_config_rocm: Whether this build is using ROCm.
|
||||||
key_value_list: A list of "key=value" strings that will be added to the
|
key_value_list: A list of "key=value" strings that will be added to the
|
||||||
module's "build_info" dictionary as additional entries.
|
module as additional fields.
|
||||||
"""
|
|
||||||
|
|
||||||
build_info = {}
|
Raises:
|
||||||
|
ValueError: If `key_value_list` includes the key "is_cuda_build", which
|
||||||
|
would clash with one of the default fields.
|
||||||
|
"""
|
||||||
|
module_docstring = "\"\"\"Generates a Python module containing information "
|
||||||
|
module_docstring += "about the build.\"\"\""
|
||||||
|
|
||||||
|
build_config_rocm_bool = "False"
|
||||||
|
build_config_cuda_bool = "False"
|
||||||
|
|
||||||
|
if is_config_rocm == "True":
|
||||||
|
build_config_rocm_bool = "True"
|
||||||
|
elif is_config_cuda == "True":
|
||||||
|
build_config_cuda_bool = "True"
|
||||||
|
|
||||||
|
key_value_pair_stmts = []
|
||||||
|
if key_value_list:
|
||||||
for arg in key_value_list:
|
for arg in key_value_list:
|
||||||
key, value = six.ensure_str(arg).split("=")
|
key, value = six.ensure_str(arg).split("=")
|
||||||
if value.lower() == "true":
|
if key == "is_cuda_build":
|
||||||
build_info[key] = True
|
raise ValueError("The key \"is_cuda_build\" cannot be passed as one of "
|
||||||
elif value.lower() == "false":
|
"the --key_value arguments.")
|
||||||
build_info[key] = False
|
if key == "is_rocm_build":
|
||||||
else:
|
raise ValueError("The key \"is_rocm_build\" cannot be passed as one of "
|
||||||
build_info[key] = value
|
"the --key_value arguments.")
|
||||||
|
key_value_pair_stmts.append("%s = %r" % (key, value))
|
||||||
# Generate cuda_build_info, a dict describing the CUDA component versions
|
key_value_pair_content = "\n".join(key_value_pair_stmts)
|
||||||
# used to build TensorFlow.
|
|
||||||
if find_cuda_config and build_info.get("is_cuda_build", False):
|
|
||||||
libs = ["_", "cuda", "cudnn"]
|
|
||||||
if platform.system() == "Linux":
|
|
||||||
if os.environ.get("TF_NEED_TENSORRT", "0") == "1":
|
|
||||||
libs.append("tensorrt")
|
|
||||||
if "TF_NCCL_VERSION" in os.environ:
|
|
||||||
libs.append("nccl")
|
|
||||||
# find_cuda_config accepts libraries to inspect as argv from the command
|
|
||||||
# line. We can work around this restriction by setting argv manually
|
|
||||||
# before calling find_cuda_config.
|
|
||||||
backup_argv = sys.argv
|
|
||||||
sys.argv = libs
|
|
||||||
cuda = find_cuda_config.find_cuda_config()
|
|
||||||
|
|
||||||
build_info["cuda_version"] = cuda["cuda_version"]
|
|
||||||
build_info["cudnn_version"] = cuda["cudnn_version"]
|
|
||||||
build_info["tensorrt_version"] = cuda.get("tensorrt_version", None)
|
|
||||||
build_info["nccl_version"] = cuda.get("nccl_version", None)
|
|
||||||
sys.argv = backup_argv
|
|
||||||
|
|
||||||
contents = """
|
contents = """
|
||||||
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@ -88,21 +76,33 @@ def write_build_info(filename, key_value_list):
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ==============================================================================
|
# ==============================================================================
|
||||||
\"\"\"Auto-generated module providing information about the build.\"\"\"
|
%s
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from __future__ import division
|
from __future__ import division
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
from collections import namedtuple
|
is_rocm_build = %s
|
||||||
|
is_cuda_build = %s
|
||||||
|
|
||||||
build_info = {build_info}
|
%s
|
||||||
""".format(build_info=build_info)
|
""" % (module_docstring, build_config_rocm_bool, build_config_cuda_bool,
|
||||||
|
key_value_pair_content)
|
||||||
open(filename, "w").write(contents)
|
open(filename, "w").write(contents)
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="""Build info injection into the PIP package.""")
|
description="""Build info injection into the PIP package.""")
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--is_config_cuda",
|
||||||
|
type=str,
|
||||||
|
help="'True' for CUDA GPU builds, 'False' otherwise.")
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--is_config_rocm",
|
||||||
|
type=str,
|
||||||
|
help="'True' for ROCm GPU builds, 'False' otherwise.")
|
||||||
|
|
||||||
parser.add_argument("--raw_generate", type=str, help="Generate build_info.py")
|
parser.add_argument("--raw_generate", type=str, help="Generate build_info.py")
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -110,7 +110,10 @@ parser.add_argument(
|
|||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
if args.raw_generate:
|
if (args.raw_generate is not None) and (args.is_config_cuda is not None) and (
|
||||||
write_build_info(args.raw_generate, args.key_value)
|
args.is_config_rocm is not None):
|
||||||
|
write_build_info(args.raw_generate, args.is_config_cuda, args.is_config_rocm,
|
||||||
|
args.key_value)
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("--raw_generate must be used.")
|
raise RuntimeError(
|
||||||
|
"--raw_generate, --is_config_cuda and --is_config_rocm must be used")
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
# lint as: python3
|
|
||||||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@ -44,8 +43,6 @@ from setuptools import setup
|
|||||||
from setuptools.command.install import install as InstallCommandBase
|
from setuptools.command.install import install as InstallCommandBase
|
||||||
from setuptools.dist import Distribution
|
from setuptools.dist import Distribution
|
||||||
|
|
||||||
from tensorflow.python.platform import build_info
|
|
||||||
|
|
||||||
DOCLINES = __doc__.split('\n')
|
DOCLINES = __doc__.split('\n')
|
||||||
|
|
||||||
# This version string is semver compatible, but incompatible with pip.
|
# This version string is semver compatible, but incompatible with pip.
|
||||||
@ -85,22 +82,6 @@ REQUIRED_PACKAGES = [
|
|||||||
'scipy == 1.2.2;python_version<"3"',
|
'scipy == 1.2.2;python_version<"3"',
|
||||||
]
|
]
|
||||||
|
|
||||||
# Generate a footer describing the CUDA technology this release was built
|
|
||||||
# against.
|
|
||||||
GPU_DESCRIPTION = ''
|
|
||||||
if build_info.build_info['is_cuda_build']:
|
|
||||||
gpu_header = ('\nTensorFlow {} for NVIDIA GPUs was built with these '
|
|
||||||
'platform and library versions:\n\n - ').format(_VERSION)
|
|
||||||
bi = build_info.build_info
|
|
||||||
trt_ver = bi['tensorrt_version']
|
|
||||||
nccl_ver = bi['nccl_version']
|
|
||||||
GPU_DESCRIPTION = gpu_header + '\n - '.join([
|
|
||||||
'NVIDIA CUDA ' + bi['cuda_version'],
|
|
||||||
'NVIDIA cuDNN ' + bi['cudnn_version'],
|
|
||||||
'NVIDIA NCCL ' + 'not enabled' if not nccl_ver else nccl_ver,
|
|
||||||
'NVIDIA TensorRT ' + 'not enabled' if not trt_ver else trt_ver,
|
|
||||||
])
|
|
||||||
|
|
||||||
if sys.byteorder == 'little':
|
if sys.byteorder == 'little':
|
||||||
# grpcio does not build correctly on big-endian machines due to lack of
|
# grpcio does not build correctly on big-endian machines due to lack of
|
||||||
# BoringSSL support.
|
# BoringSSL support.
|
||||||
@ -136,8 +117,7 @@ CONSOLE_SCRIPTS = [
|
|||||||
# even though the command is not removed, just moved to a different wheel.
|
# even though the command is not removed, just moved to a different wheel.
|
||||||
'tensorboard = tensorboard.main:run_main',
|
'tensorboard = tensorboard.main:run_main',
|
||||||
'tf_upgrade_v2 = tensorflow.tools.compatibility.tf_upgrade_v2_main:main',
|
'tf_upgrade_v2 = tensorflow.tools.compatibility.tf_upgrade_v2_main:main',
|
||||||
'estimator_ckpt_converter = '
|
'estimator_ckpt_converter = tensorflow_estimator.python.estimator.tools.checkpoint_converter:main',
|
||||||
'tensorflow_estimator.python.estimator.tools.checkpoint_converter:main',
|
|
||||||
]
|
]
|
||||||
# pylint: enable=line-too-long
|
# pylint: enable=line-too-long
|
||||||
|
|
||||||
@ -181,9 +161,10 @@ class InstallHeaders(Command):
|
|||||||
"""
|
"""
|
||||||
description = 'install C/C++ header files'
|
description = 'install C/C++ header files'
|
||||||
|
|
||||||
user_options = [
|
user_options = [('install-dir=', 'd',
|
||||||
('install-dir=', 'd', 'directory to install header files to'),
|
'directory to install header files to'),
|
||||||
('force', 'f', 'force installation (overwrite existing files)'),
|
('force', 'f',
|
||||||
|
'force installation (overwrite existing files)'),
|
||||||
]
|
]
|
||||||
|
|
||||||
boolean_options = ['force']
|
boolean_options = ['force']
|
||||||
@ -194,7 +175,8 @@ class InstallHeaders(Command):
|
|||||||
self.outfiles = []
|
self.outfiles = []
|
||||||
|
|
||||||
def finalize_options(self):
|
def finalize_options(self):
|
||||||
self.set_undefined_options('install', ('install_headers', 'install_dir'),
|
self.set_undefined_options('install',
|
||||||
|
('install_headers', 'install_dir'),
|
||||||
('force', 'force'))
|
('force', 'force'))
|
||||||
|
|
||||||
def mkdir_and_copy_file(self, header):
|
def mkdir_and_copy_file(self, header):
|
||||||
@ -254,7 +236,9 @@ so_lib_paths = [
|
|||||||
|
|
||||||
matches = []
|
matches = []
|
||||||
for path in so_lib_paths:
|
for path in so_lib_paths:
|
||||||
matches.extend(['../' + x for x in find_files('*', path) if '.py' not in x])
|
matches.extend(
|
||||||
|
['../' + x for x in find_files('*', path) if '.py' not in x]
|
||||||
|
)
|
||||||
|
|
||||||
if os.name == 'nt':
|
if os.name == 'nt':
|
||||||
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
|
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
|
||||||
@ -274,16 +258,17 @@ headers = (
|
|||||||
list(find_files('*.h', 'tensorflow/stream_executor')) +
|
list(find_files('*.h', 'tensorflow/stream_executor')) +
|
||||||
list(find_files('*.h', 'google/com_google_protobuf/src')) +
|
list(find_files('*.h', 'google/com_google_protobuf/src')) +
|
||||||
list(find_files('*.inc', 'google/com_google_protobuf/src')) +
|
list(find_files('*.inc', 'google/com_google_protobuf/src')) +
|
||||||
list(find_files('*', 'third_party/eigen3')) +
|
list(find_files('*', 'third_party/eigen3')) + list(
|
||||||
list(find_files('*.h', 'tensorflow/include/external/com_google_absl')) +
|
find_files('*.h', 'tensorflow/include/external/com_google_absl')) +
|
||||||
list(find_files('*.inc', 'tensorflow/include/external/com_google_absl')) +
|
list(
|
||||||
list(find_files('*', 'tensorflow/include/external/eigen_archive')))
|
find_files('*.inc', 'tensorflow/include/external/com_google_absl'))
|
||||||
|
+ list(find_files('*', 'tensorflow/include/external/eigen_archive')))
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name=project_name,
|
name=project_name,
|
||||||
version=_VERSION.replace('-', ''),
|
version=_VERSION.replace('-', ''),
|
||||||
description=DOCLINES[0],
|
description=DOCLINES[0],
|
||||||
long_description='\n'.join(DOCLINES[2:]) + GPU_DESCRIPTION,
|
long_description='\n'.join(DOCLINES[2:]),
|
||||||
url='https://www.tensorflow.org/',
|
url='https://www.tensorflow.org/',
|
||||||
download_url='https://github.com/tensorflow/tensorflow/tags',
|
download_url='https://github.com/tensorflow/tensorflow/tags',
|
||||||
author='Google Inc.',
|
author='Google Inc.',
|
||||||
@ -304,11 +289,6 @@ setup(
|
|||||||
] + matches,
|
] + matches,
|
||||||
},
|
},
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
# Accessible with importlib.metadata.metadata('tf-pkg-name').items()
|
|
||||||
platforms=[
|
|
||||||
'{}:{}'.format(key, value)
|
|
||||||
for key, value in build_info.build_info.items()
|
|
||||||
],
|
|
||||||
distclass=BinaryDistribution,
|
distclass=BinaryDistribution,
|
||||||
cmdclass={
|
cmdclass={
|
||||||
'install_headers': InstallHeaders,
|
'install_headers': InstallHeaders,
|
||||||
|
6
third_party/gpus/BUILD
vendored
6
third_party/gpus/BUILD
vendored
@ -1,6 +0,0 @@
|
|||||||
# Expose find_cuda_config.py as a library so other tools can reference it.
|
|
||||||
py_library(
|
|
||||||
name = "find_cuda_config",
|
|
||||||
srcs = ["find_cuda_config.py"],
|
|
||||||
visibility = ["//visibility:public"],
|
|
||||||
)
|
|
Loading…
x
Reference in New Issue
Block a user