[build] modernize build-backend: setuptools.build_meta:__legacy__ -> setuptools.build_meta (#155998)

Change `build-system.build-backend`: `setuptools.build_meta:__legacy__` -> `setuptools.build_meta`. Also, move static package info from `setup.py` to `pyproject.toml`.

Now the repo can be installed from source via `pip` command instead of `python setup.py develop`:

```bash
python -m pip install --verbose --editable .

python -m pip install --verbose --no-build-isolation --editable .
```

In addition, the SDist is also buildable:

```bash
python -m build --sdist
python -m install dist/torch-*.tar.gz  # build from source using SDist
```

Note that we should build the SDist with a fresh git clone if we will upload the output to PyPI. Because all files under `third_party` will be included in the SDist. The SDist file will be huge if the git submodules are initialized.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/155998
Approved by: https://github.com/ezyang, https://github.com/cyyever, https://github.com/atalman
ghstack dependencies: #157557
This commit is contained in:
Xuehai Pan
2025-07-04 21:07:57 +08:00
committed by PyTorch MergeBot
parent 9968edd002
commit 524e827095
5 changed files with 182 additions and 150 deletions

View File

@ -436,14 +436,21 @@ test_inductor_aoti() {
python3 tools/amd_build/build_amd.py
fi
if [[ "$BUILD_ENVIRONMENT" == *sm86* ]]; then
BUILD_AOT_INDUCTOR_TEST=1 TORCH_CUDA_ARCH_LIST=8.6 USE_FLASH_ATTENTION=OFF python setup.py develop
BUILD_COMMAND=(TORCH_CUDA_ARCH_LIST=8.6 USE_FLASH_ATTENTION=OFF python setup.py develop)
# TODO: Replace me completely, as one should not use conda libstdc++, nor need special path to TORCH_LIB
LD_LIBRARY_PATH=/opt/conda/envs/py_3.10/lib/:${TORCH_LIB_DIR}:$LD_LIBRARY_PATH
CPP_TESTS_DIR="${BUILD_BIN_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference -dist=loadfile
TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="/opt/conda/envs/py_3.10/lib:${TORCH_LIB_DIR}:${LD_LIBRARY_PATH}")
else
BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop
CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference -dist=loadfile
BUILD_COMMAND=(python setup.py develop)
TEST_ENVS=(CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}")
fi
# aoti cmake custom command requires `torch` to be installed
# initialize the cmake build cache and install torch
/usr/bin/env "${BUILD_COMMAND[@]}"
# rebuild with the build cache with `BUILD_AOT_INDUCTOR_TEST` enabled
/usr/bin/env CMAKE_FRESH=1 BUILD_AOT_INDUCTOR_TEST=1 "${BUILD_COMMAND[@]}"
/usr/bin/env "${TEST_ENVS[@]}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference -dist=loadfile
}
test_inductor_cpp_wrapper_shard() {

View File

@ -1,31 +1,50 @@
include MANIFEST.in
# Reference: https://setuptools.pypa.io/en/latest/userguide/miscellaneous.html
# Include source files in SDist
include CMakeLists.txt
include CITATION.cff
include LICENSE
include NOTICE
include .gitmodules
include build_variables.bzl
include mypy.ini
include requirements.txt
include ufunc_defs.bzl
include *.bzl *.bazel .bazel* BUILD *.BUILD BUILD.* WORKSPACE
include BUCK BUCK.*
include requirements*.txt
include version.txt
recursive-include android *.*
recursive-include aten *.*
recursive-include binaries *.*
recursive-include c10 *.*
recursive-include caffe2 *.*
recursive-include cmake *.*
recursive-include torch *.*
recursive-include tools *.*
recursive-include test *.*
recursive-include docs *.*
recursive-include ios *.*
recursive-include third_party *
recursive-include test *.*
recursive-include benchmarks *.*
recursive-include scripts *.*
recursive-include mypy_plugins *.*
recursive-include modules *.*
recursive-include functorch *.*
include [Mm]akefile *.[Mm]akefile [Mm]akefile.*
include [Dd]ockerfile *.[Dd]ockerfile [Dd]ockerfile.* .dockerignore
graft android
graft aten
graft binaries
graft c10
graft caffe2
graft cmake
graft functorch
graft third_party
graft tools
graft torch
graft torchgen
# FIXME: torch-xla build during codegen will fail if include this file in wheel
exclude torchgen/BUILD.bazel
# Misc files and directories in SDist
include *.md
include CITATION.cff
include LICENSE NOTICE
include mypy*.ini
graft benchmarks
graft docs
graft mypy_plugins
graft scripts
# Misc files needed for custom setuptools command
include .gitignore
include .gitmodules
# Include test suites in SDist
graft test
include pytest.ini
include .coveragerc
# Prune generated/compiled files
prune torchgen/packaged
prune */__pycache__
global-exclude *.o *.so *.dylib *.a .git *.pyc *.swp
global-exclude *.o *.obj *.so *.a *.dylib *.pxd *.dll *.lib *.py[cod]
prune */.git
global-exclude .git *~ *.swp

View File

@ -1,37 +1,16 @@
[project]
name = "torch"
requires-python = ">=3.9"
license = {text = "BSD-3-Clause"}
dynamic = [
"authors",
"classifiers",
"entry-points",
"dependencies",
"description",
"keywords",
"optional-dependencies",
"readme",
"scripts",
"version",
]
[project.urls]
Homepage = "https://pytorch.org/"
Documentation = "https://pytorch.org/docs/"
Source = "https://github.com/pytorch/pytorch"
Forum = "https://discuss.pytorch.org/"
# Package ######################################################################
[build-system]
requires = [
# After 75.8.2 dropped dep disttools API. Please fix
# API temporarily restored and shim used. Please fix
# Setuptools will drop support for setup.py past 80
# min version for recursive glob package data support
# 62.3.0: min version for recursive glob package data support
# 77.0.0: min version for SPDX expression support for project.license
"setuptools>=62.3.0,<80.0",
"wheel",
"astunparse",
"cmake",
"cmake>=3.27",
"ninja",
"numpy",
"packaging",
@ -39,9 +18,59 @@ requires = [
"requests",
"typing-extensions>=4.10.0",
]
# Use legacy backend to import local packages in setup.py
build-backend = "setuptools.build_meta:__legacy__"
build-backend = "setuptools.build_meta"
[project]
name = "torch"
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
readme = "README.md"
requires-python = ">=3.9,<3.14"
# TODO: change to `license = "BSD-3-Clause"` and enable PEP 639 after pinning setuptools>=77
# FIXME: As of 2025.06.20, it is hard to ensure the minimum version of setuptools in our CI environment.
# TOML-table-based license deprecated in setuptools>=77, and the deprecation warning will be changed
# to an error on 2026.02.18. See also: https://github.com/pypa/setuptools/issues/4903
license = { text = "BSD-3-Clause" }
authors = [{ name = "PyTorch Team", email = "packages@pytorch.org" }]
keywords = ["pytorch", "machine learning"]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: C++",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
]
dynamic = [
"entry-points",
"dependencies",
"scripts",
"version",
]
[project.urls]
Homepage = "https://pytorch.org"
Repository = "https://github.com/pytorch/pytorch"
Documentation = "https://pytorch.org/docs"
"Issue Tracker" = "https://github.com/pytorch/pytorch/issues"
Forum = "https://discuss.pytorch.org"
[project.optional-dependencies]
optree = ["optree>=0.13.0"]
opt-einsum = ["opt-einsum>=3.3"]
pyyaml = ["pyyaml"]
# Linter tools #################################################################
[tool.black]
line-length = 88
@ -60,12 +89,10 @@ multi_line_output = 3
include_trailing_comma = true
combine_as_imports = true
[tool.usort.known]
first_party = ["caffe2", "torch", "torchgen", "functorch", "test"]
standard_library = ["typing_extensions"]
[tool.ruff]
line-length = 88
src = ["caffe2", "torch", "torchgen", "functorch", "test"]

View File

@ -1,6 +1,7 @@
# Python dependencies required for development
astunparse
cmake
build[uv] # for building sdist and wheel
cmake>=3.27
expecttest>=0.3.0
filelock
fsspec

152
setup.py
View File

@ -244,6 +244,7 @@ if sys.platform == "win32" and sys.maxsize.bit_length() == 31:
import platform
# Also update `project.requires-python` in pyproject.toml when changing this
python_min_version = (3, 9, 0)
python_min_version_str = ".".join(map(str, python_min_version))
if sys.version_info < python_min_version:
@ -272,6 +273,28 @@ import setuptools.command.sdist
import setuptools.errors
from setuptools import Command, Extension, find_packages, setup
from setuptools.dist import Distribution
CWD = Path(__file__).absolute().parent
# Add the current directory to the Python path so that we can import `tools`.
# This is required when running this script with a PEP-517-enabled build backend.
#
# From the PEP-517 documentation: https://peps.python.org/pep-0517
#
# > When importing the module path, we do *not* look in the directory containing
# > the source tree, unless that would be on `sys.path` anyway (e.g. because it
# > is specified in `PYTHONPATH`).
#
sys.path.insert(0, str(CWD)) # this only affects the current process
# Add the current directory to PYTHONPATH so that we can import `tools` in subprocesses
os.environ["PYTHONPATH"] = os.pathsep.join(
[
str(CWD),
os.getenv("PYTHONPATH", ""),
]
).rstrip(os.pathsep)
from tools.build_pytorch_libs import build_pytorch
from tools.generate_torch_version import get_torch_version
from tools.setup_helpers.cmake import CMake, CMakeValue
@ -364,8 +387,8 @@ RUN_BUILD_DEPS = True
# see if the user passed a quiet flag to setup.py arguments and respect
# that in our parts of the build
EMIT_BUILD_WARNING = False
RERUN_CMAKE = str2bool(os.getenv("CMAKE_FRESH"))
CMAKE_ONLY = str2bool(os.getenv("CMAKE_ONLY"))
RERUN_CMAKE = str2bool(os.environ.pop("CMAKE_FRESH", None))
CMAKE_ONLY = str2bool(os.environ.pop("CMAKE_ONLY", None))
filtered_args = []
for i, arg in enumerate(sys.argv):
if arg == "--cmake":
@ -407,7 +430,6 @@ else:
setuptools.distutils.log.warn = report # type: ignore[attr-defined]
# Constant known variables used throughout this file
CWD = Path(__file__).absolute().parent
TORCH_DIR = CWD / "torch"
TORCH_LIB_DIR = TORCH_DIR / "lib"
THIRD_PARTY_DIR = CWD / "third_party"
@ -1084,14 +1106,12 @@ def configure_extension_build() -> tuple[
# pypi cuda package that requires installation of cuda runtime, cudnn and cublas
# should be included in all wheels uploaded to pypi
pytorch_extra_install_requirements = os.getenv(
"PYTORCH_EXTRA_INSTALL_REQUIREMENTS", ""
)
if pytorch_extra_install_requirements:
report(
f"pytorch_extra_install_requirements: {pytorch_extra_install_requirements}"
pytorch_extra_install_requires = os.getenv("PYTORCH_EXTRA_INSTALL_REQUIREMENTS")
if pytorch_extra_install_requires:
report(f"pytorch_extra_install_requirements: {pytorch_extra_install_requires}")
extra_install_requires.extend(
map(str.strip, pytorch_extra_install_requires.split("|"))
)
extra_install_requires += pytorch_extra_install_requirements.split("|")
# Cross-compile for M1
if IS_DARWIN:
@ -1127,10 +1147,15 @@ def configure_extension_build() -> tuple[
################################################################################
ext_modules: list[Extension] = []
# packages that we want to install into site-packages and include them in wheels
includes = ["torch", "torch.*", "torchgen", "torchgen.*"]
# exclude folders that they look like Python packages but are not wanted in wheels
excludes = ["tools", "tools.*", "caffe2", "caffe2.*"]
if not cmake_cache_vars["BUILD_FUNCTORCH"]:
if cmake_cache_vars["BUILD_FUNCTORCH"]:
includes.extend(["functorch", "functorch.*"])
else:
excludes.extend(["functorch", "functorch.*"])
packages = find_packages(exclude=excludes)
packages = find_packages(include=includes, exclude=excludes)
C = Extension(
"torch._C",
libraries=main_libraries,
@ -1208,6 +1233,7 @@ def main() -> None:
"Conflict: 'BUILD_LIBTORCH_WHL' and 'BUILD_PYTHON_ONLY' can't both be 1. "
"Set one to 0 and rerun."
)
install_requires = [
"filelock",
"typing-extensions>=4.10.0",
@ -1217,9 +1243,8 @@ def main() -> None:
"jinja2",
"fsspec",
]
if BUILD_PYTHON_ONLY:
install_requires.append(f"{LIBTORCH_PKG_NAME}=={get_torch_version()}")
install_requires += [f"{LIBTORCH_PKG_NAME}=={TORCH_VERSION}"]
if str2bool(os.getenv("USE_PRIORITIZED_TEXT_FOR_LD")):
gen_linker_script(
@ -1249,7 +1274,7 @@ def main() -> None:
try:
dist.parse_command_line()
except setuptools.errors.BaseError as e:
print(e)
print(e, file=sys.stderr)
sys.exit(1)
mirror_files_into_torchgen()
@ -1265,16 +1290,6 @@ def main() -> None:
) = configure_extension_build()
install_requires += extra_install_requires
extras_require = {
"optree": ["optree>=0.13.0"],
"opt-einsum": ["opt-einsum>=3.3"],
"pyyaml": ["pyyaml"],
}
# Read in README.md for our long_description
long_description = (CWD / "README.md").read_text(encoding="utf-8")
version_range_max = max(sys.version_info[1], 13) + 1
torch_package_data = [
"py.typed",
"bin/*",
@ -1317,22 +1332,18 @@ def main() -> None:
]
if not BUILD_LIBTORCH_WHL:
torch_package_data.extend(
[
"lib/libtorch_python.so",
"lib/libtorch_python.dylib",
"lib/libtorch_python.dll",
]
)
torch_package_data += [
"lib/libtorch_python.so",
"lib/libtorch_python.dylib",
"lib/libtorch_python.dll",
]
if not BUILD_PYTHON_ONLY:
torch_package_data.extend(
[
"lib/*.so*",
"lib/*.dylib*",
"lib/*.dll",
"lib/*.lib",
]
)
torch_package_data += [
"lib/*.so*",
"lib/*.dylib*",
"lib/*.dll",
"lib/*.lib",
]
# XXX: Why not use wildcards ["lib/aotriton.images/*", "lib/aotriton.images/**/*"] here?
aotriton_image_path = TORCH_DIR / "lib" / "aotriton.images"
aks2_files = [
@ -1342,19 +1353,15 @@ def main() -> None:
]
torch_package_data += aks2_files
if get_cmake_cache_vars()["USE_TENSORPIPE"]:
torch_package_data.extend(
[
"include/tensorpipe/*.h",
"include/tensorpipe/**/*.h",
]
)
torch_package_data += [
"include/tensorpipe/*.h",
"include/tensorpipe/**/*.h",
]
if get_cmake_cache_vars()["USE_KINETO"]:
torch_package_data.extend(
[
"include/kineto/*.h",
"include/kineto/**/*.h",
]
)
torch_package_data += [
"include/kineto/*.h",
"include/kineto/**/*.h",
]
torchgen_package_data = [
"packaged/*",
"packaged/**/*",
@ -1362,9 +1369,11 @@ def main() -> None:
package_data = {
"torch": torch_package_data,
}
exclude_package_data = {}
if not BUILD_LIBTORCH_WHL:
package_data["torchgen"] = torchgen_package_data
exclude_package_data["torchgen"] = ["*.py[co]"]
else:
# no extensions in BUILD_LIBTORCH_WHL mode
ext_modules = []
@ -1372,47 +1381,16 @@ def main() -> None:
setup(
name=TORCH_PACKAGE_NAME,
version=TORCH_VERSION,
description=(
"Tensors and Dynamic neural networks in Python with strong GPU acceleration"
),
long_description=long_description,
long_description_content_type="text/markdown",
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=packages,
entry_points=entry_points,
install_requires=install_requires,
extras_require=extras_require,
package_data=package_data,
# TODO fix later Manifest.IN file was previously ignored
include_package_data=False, # defaults to True with pyproject.toml file
url="https://pytorch.org/",
download_url="https://github.com/pytorch/pytorch/tags",
author="PyTorch Team",
author_email="packages@pytorch.org",
python_requires=f">={python_min_version_str}",
# PyPI package information.
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: C++",
"Programming Language :: Python :: 3",
]
+ [
f"Programming Language :: Python :: 3.{i}"
for i in range(python_min_version[1], version_range_max)
],
license="BSD-3-Clause",
keywords="pytorch, machine learning",
exclude_package_data=exclude_package_data,
# Disable automatic inclusion of data files because we want to
# explicitly control with `package_data` above.
include_package_data=False,
)
if EMIT_BUILD_WARNING:
print_box(build_update_message)