[bazel] add python targets (#101003)

This PR adds bazel python, so that bazel build could be used from python like `import torch`.

Notable changes:
- Add the python targets.
- Add the version.py.tpl generation.
- In order to archive the `USE_GLOBAL_DEPS = False` just for the bazel build, employ a monkey-patch hack in the mentioned `version.py.tpl`.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/101003
Approved by: https://github.com/huydhn
This commit is contained in:
Sergei Vorobev
2023-05-12 19:43:56 +00:00
committed by PyTorch MergeBot
parent 4434b9af6a
commit 630593d3cc
7 changed files with 104 additions and 6 deletions

View File

@ -96,6 +96,9 @@ build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-parameter
# likely want to have this disabled for the most part.
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-missing-field-initializers
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-function
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-variable
build --per_file_copt='//:aten/src/ATen/RegisterCompositeExplicitAutograd\.cpp$'@-Wno-error=unused-function
build --per_file_copt='//:aten/src/ATen/RegisterCompositeImplicitAutograd\.cpp$'@-Wno-error=unused-function
build --per_file_copt='//:aten/src/ATen/RegisterMkldnnCPU\.cpp$'@-Wno-error=unused-function

View File

@ -197,7 +197,7 @@ if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then
if [[ "$CUDA_VERSION" == "cpu" ]]; then
# Build torch, the Python module, and tests for CPU-only
tools/bazel build --config=no-tty "${BAZEL_MEM_LIMIT}" "${BAZEL_CPU_LIMIT}" --config=cpu-only :torch :_C.so :all_tests
tools/bazel build --config=no-tty "${BAZEL_MEM_LIMIT}" "${BAZEL_CPU_LIMIT}" --config=cpu-only :torch :torch/_C.so :all_tests
else
tools/bazel build --config=no-tty "${BAZEL_MEM_LIMIT}" "${BAZEL_CPU_LIMIT}" //...
fi

View File

@ -889,6 +889,7 @@ test_bazel() {
//:torch_dist_autograd_test \
//:torch_include_test \
//:transformer_test \
//:test_bazel \
//c10/cuda/test:test \
//c10/test:core_tests \
//c10/test:typeid_test \

View File

@ -1,14 +1,15 @@
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@pybind11_bazel//:build_defs.bzl", "pybind_extension")
load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
load("@pytorch//third_party:substitution.bzl", "header_template_rule")
load("@pytorch//third_party:substitution.bzl", "header_template_rule", "template_rule")
load("@pytorch//:tools/bazel.bzl", "rules")
load("@pytorch//tools/rules:cu.bzl", "cu_library")
load("@pytorch//tools/config:defs.bzl", "if_cuda")
load("@pytorch//:aten.bzl", "generate_aten", "intern_build_aten_ops")
load(":build.bzl", "GENERATED_AUTOGRAD_CPP", "GENERATED_AUTOGRAD_PYTHON", "define_targets")
load(":build_variables.bzl", "jit_core_sources", "lazy_tensor_ts_sources", "libtorch_core_sources", "libtorch_cuda_sources", "libtorch_distributed_sources", "libtorch_extra_sources", "libtorch_nvfuser_generated_headers", "libtorch_nvfuser_runtime_sources", "libtorch_python_core_sources", "torch_cpp_srcs")
load(":build_variables.bzl", "jit_core_sources", "lazy_tensor_ts_sources", "libtorch_core_sources", "libtorch_cuda_sources", "libtorch_distributed_sources", "libtorch_extra_sources", "libtorch_nvfuser_generated_headers", "libtorch_nvfuser_runtime_sources", "libtorch_python_core_sources", "torch_cpp_srcs", "libtorch_python_cuda_sources", "libtorch_python_distributed_sources")
load(":ufunc_defs.bzl", "aten_ufunc_generated_cpu_kernel_sources", "aten_ufunc_generated_cpu_sources", "aten_ufunc_generated_cuda_sources")
load("//:tools/bazel.bzl", "rules")
define_targets(rules = rules)
@ -1671,19 +1672,72 @@ cc_library(
cc_library(
name = "torch_python",
srcs = libtorch_python_core_sources + GENERATED_AUTOGRAD_PYTHON,
srcs = libtorch_python_core_sources
+ if_cuda(libtorch_python_cuda_sources)
+ if_cuda(libtorch_python_distributed_sources)
+ GENERATED_AUTOGRAD_PYTHON,
hdrs = glob([
"torch/csrc/generic/*.cpp",
]),
copts = COMMON_COPTS + if_cuda(["-DUSE_CUDA=1"]),
deps = [
":shm",
":torch",
":shm",
"@pybind11",
],
)
pybind_extension(
name = "_C",
name = "torch/_C",
srcs = ["torch/csrc/stub.c"],
deps = [
":torch_python",
":aten_nvrtc",
],
)
cc_binary(
name = "torch/bin/torch_shm_manager",
srcs = [
"torch/lib/libshm/manager.cpp",
],
deps = [
":shm",
],
linkstatic = False,
)
template_rule(
name = "gen_version_py",
src = ":torch/version.py.tpl",
out = "torch/version.py",
substitutions = if_cuda({
# Set default to 11.2. Otherwise Torchvision complains about incompatibility.
"{{CUDA_VERSION}}": "11.2",
"{{VERSION}}": "2.0.0",
}, {
"{{CUDA_VERSION}}": "None",
"{{VERSION}}": "2.0.0",
}),
)
rules.py_library(
name = "pytorch_py",
visibility = ["//visibility:public"],
srcs = glob(["torch/**/*.py"], exclude = ["torch/version.py"]) + [":torch/version.py"],
deps = [
rules.requirement("future"),
rules.requirement("numpy"),
rules.requirement("pyyaml"),
rules.requirement("requests"),
rules.requirement("setuptools"),
rules.requirement("six"),
rules.requirement("typing_extensions"),
"//torchgen",
],
data = [
":torch/_C.so",
":torch/bin/torch_shm_manager",
],
)
@ -1845,6 +1899,14 @@ cc_test(
],
)
# python api tests
py_test(
name = "test_bazel",
srcs = ["test/test_bazel.py"],
deps = [":pytorch_py"],
)
# all tests
test_suite(
name = "all_tests",

View File

@ -54,6 +54,13 @@ http_archive(
urls = [
"https://github.com/google/glog/archive/v0.4.0.tar.gz",
],
build_file_content = """
licenses(['notice'])
load(':bazel/glog.bzl', 'glog_library')
# TODO: figure out why enabling gflags leads to SIGSEV on the logging init
glog_library(with_gflags=0)
"""
)
http_archive(

15
test/test_bazel.py Normal file
View File

@ -0,0 +1,15 @@
# Owner(s): ["module: bazel"]
"""
This test module contains a minimalistic "smoke tests" for the bazel build.
Currently it doesn't use any testing framework (i.e. pytest)
TODO: integrate this into the existing pytorch testing framework.
"""
import torch
def test_sum():
assert torch.eq(torch.tensor([[1, 2, 3]]) + torch.tensor([[4, 5, 6]]), torch.tensor([[5, 7, 9]])).all()
test_sum()

10
torch/version.py.tpl Normal file
View File

@ -0,0 +1,10 @@
__version__ = '{{VERSION}}'
debug = False
cuda = '{{CUDA_VERSION}}'
hip = None
# This is a gross monkey-patch hack that depends on the order of imports
# in torch/__init__.py
# TODO: find a more elegant solution to set `USE_GLOBAL_DEPS` for the bazel build
import torch
torch.USE_GLOBAL_DEPS = False