mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[Test] create shared targets for xplat aten (#78345)
Differential Revision: D36694963 Pull Request resolved: https://github.com/pytorch/pytorch/pull/78345 Approved by: https://github.com/kit1980
This commit is contained in:
committed by
PyTorch MergeBot
parent
50f2af84da
commit
430955b3a8
204
BUCK.oss
204
BUCK.oss
@ -23,6 +23,11 @@ load(
|
||||
"aten_ufunc_generated_all_cpu_sources",
|
||||
"TEMPLATE_SOURCE_LIST",
|
||||
)
|
||||
load(":buckbuild.bzl",
|
||||
"define_buck_targets",
|
||||
)
|
||||
|
||||
define_buck_targets()
|
||||
|
||||
cxx_library(
|
||||
name = "pthreadpool",
|
||||
@ -85,67 +90,23 @@ cxx_library(
|
||||
visibility = ['PUBLIC'],
|
||||
)
|
||||
|
||||
cxx_library(
|
||||
name = "aten_header",
|
||||
header_namespace = "",
|
||||
exported_headers = subdir_glob([
|
||||
# ATen Core
|
||||
("aten/src", "ATen/core/**/*.h"),
|
||||
("aten/src", "ATen/ops/*.h"),
|
||||
# ATen Base
|
||||
("aten/src", "ATen/*.h"),
|
||||
("aten/src", "ATen/cpu/**/*.h"),
|
||||
("aten/src", "ATen/detail/*.h"),
|
||||
("aten/src", "ATen/quantized/*.h"),
|
||||
("aten/src", "ATen/vulkan/*.h"),
|
||||
("aten/src", "ATen/metal/*.h"),
|
||||
("aten/src", "ATen/mps/*.h"),
|
||||
("aten/src", "ATen/nnapi/*.h"),
|
||||
# ATen Native
|
||||
("aten/src", "ATen/native/*.h"),
|
||||
("aten/src", "ATen/native/ao_sparse/quantized/cpu/*.h"),
|
||||
("aten/src", "ATen/native/cpu/**/*.h"),
|
||||
("aten/src", "ATen/native/sparse/*.h"),
|
||||
("aten/src", "ATen/native/mps/*.h"),
|
||||
("aten/src", "ATen/native/nested/*.h"),
|
||||
("aten/src", "ATen/native/quantized/*.h"),
|
||||
("aten/src", "ATen/native/quantized/cpu/*.h"),
|
||||
("aten/src", "ATen/native/transformers/*.h"),
|
||||
("aten/src", "ATen/native/ufunc/*.h"),
|
||||
("aten/src", "ATen/native/utils/*.h"),
|
||||
("aten/src", "ATen/native/vulkan/ops/*.h"),
|
||||
("aten/src", "ATen/native/xnnpack/*.h"),
|
||||
# Remove the following after modifying codegen for mobile.
|
||||
("aten/src", "ATen/mkl/*.h"),
|
||||
("aten/src", "ATen/native/mkl/*.h"),
|
||||
("aten/src", "ATen/native/mkldnn/*.h"),
|
||||
], exclude = ["aten/src/ATen/Config.h"]),
|
||||
visibility = ["PUBLIC"],
|
||||
)
|
||||
|
||||
cxx_library(
|
||||
name = "jit_core_headers",
|
||||
header_namespace = "",
|
||||
exported_headers = subdir_glob([("", x) for x in jit_core_headers]),
|
||||
)
|
||||
|
||||
cxx_library(
|
||||
name = "generated_aten_config_header",
|
||||
header_namespace = "ATen",
|
||||
exported_headers = {
|
||||
"Config.h": ":generate_aten_config[Config.h]",
|
||||
},
|
||||
)
|
||||
|
||||
cxx_library(
|
||||
name = "torch_mobile_headers",
|
||||
header_namespace = "",
|
||||
exported_headers = subdir_glob(
|
||||
[
|
||||
("", "torch/csrc/jit/mobile/*.h"),
|
||||
],
|
||||
),
|
||||
visibility = ["PUBLIC"],
|
||||
build_aten_cpu(
|
||||
name = "aten_cpu",
|
||||
srcs = jit_core_sources +
|
||||
aten_cpu_source_list + [
|
||||
# Generated
|
||||
":gen_aten[Functions.cpp]",
|
||||
":gen_aten[Operators_0.cpp]",
|
||||
":gen_aten[Operators_1.cpp]",
|
||||
":gen_aten[Operators_2.cpp]",
|
||||
":gen_aten[Operators_3.cpp]",
|
||||
":gen_aten[Operators_4.cpp]",
|
||||
":gen_aten[core/ATenOpList.cpp]",
|
||||
":gen_aten[core/TensorMethods.cpp]",
|
||||
] + [
|
||||
# Needed by ATen/native/EmbeddingBag.cpp
|
||||
"caffe2/perfkernels/embedding_lookup_idx.cc",
|
||||
],
|
||||
)
|
||||
|
||||
fb_xplat_genrule(
|
||||
@ -169,7 +130,7 @@ fb_xplat_genrule(
|
||||
"-e 's/@AT_BUILD_WITH_LAPACK@/USE_LAPACK_FBXPLAT/g'",
|
||||
"-e 's/@AT_BLAS_F2C@/AT_BLAS_F2C_FBXPLAT/g'",
|
||||
"-e 's/@AT_BLAS_USE_CBLAS_DOT@/AT_BLAS_USE_CBLAS_DOT_FBXPLAT/g'",
|
||||
"aten/src/ATen/Config.h.in > $OUT/Config.h"
|
||||
"aten/src/ATen/Config.h.in > $OUT/Config.h",
|
||||
]),
|
||||
outs = {
|
||||
"Config.h": ["Config.h"],
|
||||
@ -206,56 +167,8 @@ cxx_library(
|
||||
header_namespace = "ATen",
|
||||
exported_headers = ATEN_EXPORTED_HEADERS,
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "aten_src_path",
|
||||
srcs = [
|
||||
"aten/src/ATen/native/native_functions.yaml",
|
||||
"aten/src/ATen/native/tags.yaml",
|
||||
] + glob(["aten/src/ATen/templates/*"]),
|
||||
visibility = [
|
||||
"PUBLIC",
|
||||
],
|
||||
)
|
||||
|
||||
build_aten_cpu(
|
||||
name = "aten_cpu",
|
||||
srcs = jit_core_sources +
|
||||
aten_cpu_source_list + [
|
||||
# Generated
|
||||
":gen_aten[Functions.cpp]",
|
||||
":gen_aten[Operators_0.cpp]",
|
||||
":gen_aten[Operators_1.cpp]",
|
||||
":gen_aten[Operators_2.cpp]",
|
||||
":gen_aten[Operators_3.cpp]",
|
||||
":gen_aten[Operators_4.cpp]",
|
||||
":gen_aten[core/ATenOpList.cpp]",
|
||||
":gen_aten[core/TensorMethods.cpp]",
|
||||
] + [
|
||||
# Needed by ATen/native/EmbeddingBag.cpp
|
||||
"caffe2/perfkernels/embedding_lookup_idx.cc",
|
||||
],
|
||||
)
|
||||
|
||||
gen_aten_libtorch_files(name = "gen_aten_libtorch")
|
||||
|
||||
|
||||
GENERATED_AUTOGRAD_H = {
|
||||
"Functions.h": ":gen_aten_libtorch[autograd/generated/Functions.h]",
|
||||
"VariableType.h": ":gen_aten_libtorch[autograd/generated/VariableType.h]",
|
||||
"variable_factories.h": ":gen_aten_libtorch[autograd/generated/variable_factories.h]",
|
||||
|
||||
# Don't build python bindings on mobile.
|
||||
#"python_functions.h",
|
||||
}
|
||||
|
||||
cxx_library(
|
||||
name = "generated-autograd-headers",
|
||||
header_namespace = "torch/csrc/autograd/generated",
|
||||
exported_headers = GENERATED_AUTOGRAD_H,
|
||||
visibility = ["PUBLIC"],
|
||||
)
|
||||
|
||||
cxx_library(
|
||||
name = "torch_mobile_observer",
|
||||
srcs = [
|
||||
@ -276,63 +189,13 @@ cxx_library(
|
||||
],
|
||||
)
|
||||
|
||||
fb_xplat_genrule(
|
||||
name = "generate-version-header",
|
||||
srcs = [
|
||||
"torch/csrc/api/include/torch/version.h.in",
|
||||
"version.txt",
|
||||
],
|
||||
cmd = "$(exe //tools/setup_helpers:gen-version-header) " + " ".join([
|
||||
"--template-path",
|
||||
"torch/csrc/api/include/torch/version.h.in",
|
||||
"--version-path",
|
||||
"version.txt",
|
||||
"--output-path",
|
||||
"$OUT/version.h",
|
||||
python_library(
|
||||
name = "aten_code_template",
|
||||
srcs = subdir_glob([
|
||||
("aten", "src/ATen/code_template.py"),
|
||||
]),
|
||||
outs = {
|
||||
"version.h": ["version.h"],
|
||||
},
|
||||
default_outs = ["."],
|
||||
)
|
||||
|
||||
cxx_library(
|
||||
name = "generated-version-header",
|
||||
header_namespace = "torch",
|
||||
exported_headers = {
|
||||
"version.h": ":generate-version-header[version.h]",
|
||||
},
|
||||
)
|
||||
|
||||
cxx_library(
|
||||
name = "torch_headers",
|
||||
header_namespace = "",
|
||||
exported_headers = subdir_glob(
|
||||
[
|
||||
("torch/csrc/api/include", "torch/**/*.h"),
|
||||
("", "torch/csrc/**/*.h"),
|
||||
("", "torch/csrc/generic/*.cpp"),
|
||||
("", "torch/script.h"),
|
||||
("", "torch/library.h"),
|
||||
("", "torch/custom_class.h"),
|
||||
("", "torch/custom_class_detail.h"),
|
||||
# Add again due to namespace difference from aten_header.
|
||||
("", "aten/src/ATen/*.h"),
|
||||
("", "aten/src/ATen/quantized/*.h"),
|
||||
],
|
||||
exclude = [
|
||||
# Don't need on mobile.
|
||||
"torch/csrc/Exceptions.h",
|
||||
"torch/csrc/python_headers.h",
|
||||
"torch/csrc/utils/auto_gil.h",
|
||||
"torch/csrc/jit/serialization/mobile_bytecode_generated.h",
|
||||
"torch/csrc/api/include/torch/version.h",
|
||||
],
|
||||
),
|
||||
base_module = "",
|
||||
visibility = ["PUBLIC"],
|
||||
deps = [
|
||||
":generated-version-header",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@ -541,6 +404,17 @@ python_library(
|
||||
],
|
||||
)
|
||||
|
||||
python_binary(
|
||||
name = "gen_aten_bin",
|
||||
main_module = "torchgen.gen",
|
||||
visibility = [
|
||||
"PUBLIC",
|
||||
],
|
||||
deps = [
|
||||
"//torchgen:torchgen",
|
||||
],
|
||||
)
|
||||
|
||||
python_binary(
|
||||
name = "gen_operators_yaml",
|
||||
main_module = "gen_operators_yaml",
|
||||
|
Reference in New Issue
Block a user