mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: This PR simplifies `.github/scripts/generate_ci_workflows.py` by using the same strategy as https://github.com/pytorch/pytorch/issues/54344, representing workflows as plain data to avoid duplicating the definition of the `generate_workflow_file` function. This will make the script easier to maintain if/when that function is modified and/or more workflow types are added. Pull Request resolved: https://github.com/pytorch/pytorch/pull/58491 Test Plan: The Lint job in CI; specifically: ``` make generate-gha-workflows mypy --config mypy-strict.ini ``` Reviewed By: malfet, seemethere Differential Revision: D28511918 Pulled By: samestep fbshipit-source-id: aaf415a954d938a29aee7c9367c9bc2b9f44bb01
187 lines
7.4 KiB
Python
Executable File
187 lines
7.4 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
from pathlib import Path
|
|
from typing import Any, Dict
|
|
|
|
import jinja2
|
|
|
|
DOCKER_REGISTRY = "308535385114.dkr.ecr.us-east-1.amazonaws.com"
|
|
|
|
GITHUB_DIR = Path(__file__).parent.parent
|
|
|
|
|
|
# it would be nice to statically specify that build_environment must be
|
|
# present, but currently Python has no easy way to do that
|
|
# https://github.com/python/mypy/issues/4617
|
|
PyTorchWorkflow = Dict[str, Any]
|
|
|
|
|
|
def PyTorchWindowsWorkflow(
|
|
*,
|
|
build_environment: str,
|
|
on_pull_request: bool = False
|
|
) -> PyTorchWorkflow:
|
|
CPU_TEST_RUNNER = "windows.4xlarge"
|
|
CUDA_TEST_RUNNER = "windows.8xlarge.nvidia.gpu"
|
|
return {
|
|
"build_environment": build_environment,
|
|
"test_runner_type": (
|
|
CUDA_TEST_RUNNER
|
|
if "cuda" in build_environment
|
|
else CPU_TEST_RUNNER
|
|
),
|
|
"on_pull_request": on_pull_request,
|
|
}
|
|
|
|
|
|
def PyTorchLinuxWorkflow(
|
|
*,
|
|
build_environment: str,
|
|
docker_image_base: str,
|
|
on_pull_request: bool = False,
|
|
enable_doc_jobs: bool = False,
|
|
) -> PyTorchWorkflow:
|
|
CPU_TEST_RUNNER = "linux.2xlarge"
|
|
CUDA_TEST_RUNNER = "linux.8xlarge.nvidia.gpu"
|
|
return {
|
|
"build_environment": build_environment,
|
|
"docker_image_base": docker_image_base,
|
|
"test_runner_type": (
|
|
CUDA_TEST_RUNNER
|
|
if "cuda" in build_environment
|
|
else CPU_TEST_RUNNER
|
|
),
|
|
"on_pull_request": on_pull_request,
|
|
"enable_doc_jobs": enable_doc_jobs,
|
|
}
|
|
|
|
|
|
def generate_workflow_file(
|
|
*,
|
|
workflow: PyTorchWorkflow,
|
|
workflow_template: jinja2.Template,
|
|
) -> Path:
|
|
output_file_path = GITHUB_DIR / f"workflows/{workflow['build_environment']}.yml"
|
|
with open(output_file_path, "w") as output_file:
|
|
output_file.writelines(["# @generated DO NOT EDIT MANUALLY\n"])
|
|
output_file.write(workflow_template.render(**workflow))
|
|
output_file.write("\n")
|
|
return output_file_path
|
|
|
|
|
|
WINDOWS_WORKFLOWS = [
|
|
PyTorchWindowsWorkflow(
|
|
build_environment="pytorch-win-vs2019-cpu-py3",
|
|
)
|
|
]
|
|
|
|
LINUX_WORKFLOWS = [
|
|
PyTorchLinuxWorkflow(
|
|
build_environment="pytorch-linux-xenial-py3.6-gcc5.4",
|
|
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
|
|
on_pull_request=True,
|
|
enable_doc_jobs=True,
|
|
),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-paralleltbb-linux-xenial-py3.6-gcc5.4",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-parallelnative-linux-xenial-py3.6-gcc5.4",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-pure_torch-linux-xenial-py3.6-gcc5.4",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-gcc7",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc7",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang5-asan",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-asan",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang7-onnx",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang7-onnx",
|
|
# ),
|
|
PyTorchLinuxWorkflow(
|
|
build_environment="pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7",
|
|
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
|
|
),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-bionic-py3.6-clang9-noarch",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.6-clang9",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-xla-linux-bionic-py3.6-clang9",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.6-clang9",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-vulkan-linux-bionic-py3.6-clang9",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.6-clang9",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-bionic-py3.8-gcc9-coverage",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.8-gcc9",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-bionic-rocm3.9-py3.6",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-rocm3.9-py3.6",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-x86_32",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-x86_64",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-arm-v7a",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-arm-v8a",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-asan",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile-custom-dynamic",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile-custom-static",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
# ),
|
|
# PyTorchLinuxWorkflow(
|
|
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile-code-analysis",
|
|
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
# ),
|
|
]
|
|
|
|
if __name__ == "__main__":
|
|
jinja_env = jinja2.Environment(
|
|
variable_start_string="!{{",
|
|
loader=jinja2.FileSystemLoader(str(GITHUB_DIR.joinpath("templates"))),
|
|
)
|
|
template_and_workflows = [
|
|
(jinja_env.get_template("linux_ci_workflow.yml.in"), LINUX_WORKFLOWS),
|
|
(jinja_env.get_template("windows_ci_workflow.yml.in"), WINDOWS_WORKFLOWS)
|
|
]
|
|
for template, workflows in template_and_workflows:
|
|
for workflow in workflows:
|
|
print(generate_workflow_file(workflow=workflow, workflow_template=template))
|