Add a Cython build dependency, start using some Cython tensor utilities
PiperOrigin-RevId: 171166294
This commit is contained in:
parent
5f97262ae6
commit
c49eeeee54
@ -28,6 +28,76 @@ def tf_deps(deps, suffix):
|
||||
|
||||
return tf_deps
|
||||
|
||||
# Modified from @cython//:Tools/rules.bzl
|
||||
def pyx_library(
|
||||
name,
|
||||
deps=[],
|
||||
py_deps=[],
|
||||
srcs=[],
|
||||
**kwargs):
|
||||
"""Compiles a group of .pyx / .pxd / .py files.
|
||||
|
||||
First runs Cython to create .cpp files for each input .pyx or .py + .pxd
|
||||
pair. Then builds a shared object for each, passing "deps" to each cc_binary
|
||||
rule (includes Python headers by default). Finally, creates a py_library rule
|
||||
with the shared objects and any pure Python "srcs", with py_deps as its
|
||||
dependencies; the shared objects can be imported like normal Python files.
|
||||
|
||||
Args:
|
||||
name: Name for the rule.
|
||||
deps: C/C++ dependencies of the Cython (e.g. Numpy headers).
|
||||
py_deps: Pure Python dependencies of the final library.
|
||||
srcs: .py, .pyx, or .pxd files to either compile or pass through.
|
||||
**kwargs: Extra keyword arguments passed to the py_library.
|
||||
"""
|
||||
# First filter out files that should be run compiled vs. passed through.
|
||||
py_srcs = []
|
||||
pyx_srcs = []
|
||||
pxd_srcs = []
|
||||
for src in srcs:
|
||||
if src.endswith(".pyx") or (src.endswith(".py")
|
||||
and src[:-3] + ".pxd" in srcs):
|
||||
pyx_srcs.append(src)
|
||||
elif src.endswith(".py"):
|
||||
py_srcs.append(src)
|
||||
else:
|
||||
pxd_srcs.append(src)
|
||||
if src.endswith("__init__.py"):
|
||||
pxd_srcs.append(src)
|
||||
|
||||
# Invoke cython to produce the shared object libraries.
|
||||
cpp_outs = [src.split(".")[0] + ".cpp" for src in pyx_srcs]
|
||||
native.genrule(
|
||||
name = name + "_cython_translation",
|
||||
srcs = pyx_srcs,
|
||||
outs = cpp_outs,
|
||||
cmd = ("PYTHONHASHSEED=0 $(location @cython//:cython_binary) --cplus $(SRCS)"
|
||||
# Rename outputs to expected location.
|
||||
+ """ && python -c 'import shutil, sys; n = len(sys.argv); [shutil.copyfile(src.split(".")[0] + ".cpp", dst) for src, dst in zip(sys.argv[1:], sys.argv[1+n//2:])]' $(SRCS) $(OUTS)"""),
|
||||
tools = ["@cython//:cython_binary"] + pxd_srcs,
|
||||
)
|
||||
|
||||
shared_objects = []
|
||||
for src in pyx_srcs:
|
||||
stem = src.split(".")[0]
|
||||
shared_object_name = stem + ".so"
|
||||
native.cc_binary(
|
||||
name=shared_object_name,
|
||||
srcs=[stem + ".cpp"],
|
||||
deps=deps + ["//util/python:python_headers"],
|
||||
linkshared = 1,
|
||||
)
|
||||
shared_objects.append(shared_object_name)
|
||||
|
||||
# Now create a py_library with these shared objects as data.
|
||||
native.py_library(
|
||||
name=name,
|
||||
srcs=py_srcs,
|
||||
deps=py_deps,
|
||||
srcs_version = "PY2AND3",
|
||||
data=shared_objects,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
def _proto_cc_hdrs(srcs, use_grpc_plugin=False):
|
||||
ret = [s[:-len(".proto")] + ".pb.h" for s in srcs]
|
||||
@ -299,7 +369,6 @@ def tf_additional_proto_srcs():
|
||||
def tf_additional_all_protos():
|
||||
return ["//tensorflow/core:protos_all"]
|
||||
|
||||
|
||||
def tf_protos_all_impl():
|
||||
return ["//tensorflow/core:protos_all_cc_impl"]
|
||||
|
||||
|
@ -24,6 +24,7 @@ load("//tensorflow:tensorflow.bzl", "tf_py_wrap_cc")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_shared_object")
|
||||
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
|
||||
load("//tensorflow:tensorflow.bzl", "cuda_py_tests")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "pyx_library")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library_py")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_additional_lib_deps")
|
||||
@ -503,6 +504,7 @@ py_library(
|
||||
":common_shapes",
|
||||
":cpp_shape_inference_proto_py",
|
||||
":errors",
|
||||
":framework_fast_tensor_util",
|
||||
":framework_for_generated_wrappers",
|
||||
":function",
|
||||
":graph_util",
|
||||
@ -733,8 +735,6 @@ py_library(
|
||||
],
|
||||
)
|
||||
|
||||
# load("//third_party/py/cython:build_defs.bzl", "pyx_library")
|
||||
|
||||
py_library(
|
||||
name = "extra_py_tests_deps",
|
||||
srcs_version = "PY2AND3",
|
||||
@ -4358,3 +4358,10 @@ py_test(
|
||||
"//third_party/py/numpy",
|
||||
],
|
||||
)
|
||||
|
||||
pyx_library(
|
||||
name = "framework_fast_tensor_util",
|
||||
srcs = ["framework/fast_tensor_util.pyx"],
|
||||
py_deps = ["//tensorflow/python:util"],
|
||||
deps = ["//third_party/py/numpy:headers"],
|
||||
)
|
||||
|
103
tensorflow/python/framework/fast_tensor_util.pyx
Normal file
103
tensorflow/python/framework/fast_tensor_util.pyx
Normal file
@ -0,0 +1,103 @@
|
||||
#cython: boundscheck=False
|
||||
#cython: wraparound=False
|
||||
#cython: infer_types=True
|
||||
import numpy as np
|
||||
cimport numpy as np
|
||||
|
||||
from tensorflow.python.util import compat
|
||||
|
||||
|
||||
def AppendFloat32ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.float32_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.float_val.append(nparray[i])
|
||||
|
||||
|
||||
def AppendFloat64ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.float64_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.double_val.append(nparray[i])
|
||||
|
||||
|
||||
def AppendInt32ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.int32_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.int_val.append(nparray[i])
|
||||
|
||||
|
||||
def AppendInt64ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.int64_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.int64_val.append(nparray[i])
|
||||
|
||||
|
||||
def AppendUInt8ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.uint8_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.int_val.append(nparray[i])
|
||||
|
||||
|
||||
def AppendUInt16ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.uint16_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.int_val.append(nparray[i])
|
||||
|
||||
|
||||
def AppendInt16ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.int16_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.int_val.append(nparray[i])
|
||||
|
||||
|
||||
def AppendInt8ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.int8_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.int_val.append(nparray[i])
|
||||
|
||||
|
||||
def AppendComplex64ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.complex64_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.scomplex_val.append(nparray[i].real)
|
||||
tensor_proto.scomplex_val.append(nparray[i].imag)
|
||||
|
||||
|
||||
def AppendComplex128ArrayToTensorProto(
|
||||
tensor_proto, np.ndarray[np.complex128_t, ndim=1] nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.dcomplex_val.append(nparray[i].real)
|
||||
tensor_proto.dcomplex_val.append(nparray[i].imag)
|
||||
|
||||
|
||||
def AppendObjectArrayToTensorProto(tensor_proto, np.ndarray nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.string_val.append(compat.as_bytes(nparray[i]))
|
||||
|
||||
|
||||
def AppendBoolArrayToTensorProto(tensor_proto, nparray):
|
||||
cdef long i, n
|
||||
n = nparray.size
|
||||
for i in range(n):
|
||||
tensor_proto.bool_val.append(np.asscalar(nparray[i]))
|
@ -27,8 +27,7 @@ from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.util import compat
|
||||
|
||||
# TODO(opensource): Add support for pyx_library in the open-source build.
|
||||
# For now, we use the slow versions that fast_tensor_util replaces.
|
||||
# Fallback in case fast_tensor_util is not properly compiled.
|
||||
# pylint: disable=g-import-not-at-top
|
||||
try:
|
||||
from tensorflow.python.framework import fast_tensor_util
|
||||
|
@ -713,6 +713,17 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
|
||||
actual = "@cub_archive//:cub",
|
||||
)
|
||||
|
||||
native.new_http_archive(
|
||||
name = "cython",
|
||||
sha256 = "6dcd30b5ceb887b2b965ee7ceb82ea3acb5f0642fe2206c7636b45acea4798e5",
|
||||
urls = [
|
||||
"http://mirror.bazel.build/github.com/cython/cython/archive/3732784c45cfb040a5b0936951d196f83a12ea17.tar.gz",
|
||||
"https://github.com/cython/cython/archive/3732784c45cfb040a5b0936951d196f83a12ea17.tar.gz",
|
||||
],
|
||||
strip_prefix = "cython-3732784c45cfb040a5b0936951d196f83a12ea17",
|
||||
build_file = str(Label("//third_party:cython.BUILD")),
|
||||
)
|
||||
|
||||
native.http_archive(
|
||||
name = "bazel_toolchains",
|
||||
urls = [
|
||||
|
28
third_party/cython.BUILD
vendored
Normal file
28
third_party/cython.BUILD
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
# Modified version of @cython//:BUILD.bazel
|
||||
|
||||
py_library(
|
||||
name = "cython_lib",
|
||||
srcs = glob(
|
||||
["Cython/**/*.py"],
|
||||
exclude = [
|
||||
"**/Tests/*.py",
|
||||
],
|
||||
) + ["cython.py"],
|
||||
data = glob([
|
||||
"Cython/**/*.pyx",
|
||||
"Cython/Utility/*.*",
|
||||
"Cython/Includes/**/*.pxd",
|
||||
]),
|
||||
srcs_version = "PY2AND3",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# May not be named "cython", since that conflicts with Cython/ on OSX
|
||||
py_binary(
|
||||
name = "cython_binary",
|
||||
srcs = ["cython.py"],
|
||||
main = "cython.py",
|
||||
srcs_version = "PY2AND3",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["cython_lib"],
|
||||
)
|
Loading…
Reference in New Issue
Block a user