mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert D33851316: ci: Migrate macOS x86_64 binary builds to GHA
Test Plan: revert-hammer Differential Revision: D33851316 (c2e63b43ce
) Original commit changeset: 3c953f0e4e4b Original Phabricator Diff: D33851316 (c2e63b43ce
) fbshipit-source-id: d95670332bbe44725b589e6d895f99b6d8821024 (cherry picked from commit 5f1861d777b913a94be7844e5eef28b53ab7010d)
This commit is contained in:
committed by
PyTorch MergeBot
parent
14538fa7bf
commit
2a391284fc
@ -31,6 +31,23 @@ def get_processor_arch_name(gpu_version):
|
||||
)
|
||||
|
||||
CONFIG_TREE_DATA = OrderedDict(
|
||||
macos=([None], OrderedDict(
|
||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7",
|
||||
],
|
||||
)),
|
||||
macos_arm64=([None], OrderedDict(
|
||||
wheel=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
],
|
||||
conda=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
],
|
||||
)),
|
||||
windows=(
|
||||
# Stop building Win+CU102, see https://github.com/pytorch/pytorch/issues/65648
|
||||
[v for v in dimensions.GPU_VERSIONS if v not in dimensions.ROCM_VERSION_LABELS and v != "cuda102"],
|
||||
|
393
.circleci/config.yml
generated
393
.circleci/config.yml
generated
@ -1678,6 +1678,136 @@ jobs:
|
||||
workflows:
|
||||
binary_builds:
|
||||
jobs:
|
||||
- binary_mac_build:
|
||||
name: binary_macos_wheel_3_7_cpu_nightly_build
|
||||
build_environment: "wheel 3.7 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_mac_build:
|
||||
name: binary_macos_wheel_3_8_cpu_nightly_build
|
||||
build_environment: "wheel 3.8 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_mac_build:
|
||||
name: binary_macos_wheel_3_9_cpu_nightly_build
|
||||
build_environment: "wheel 3.9 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_mac_build:
|
||||
name: binary_macos_wheel_3_10_cpu_nightly_build
|
||||
build_environment: "wheel 3.10 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_mac_build:
|
||||
name: binary_macos_conda_3_7_cpu_nightly_build
|
||||
build_environment: "conda 3.7 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_mac_build:
|
||||
name: binary_macos_conda_3_8_cpu_nightly_build
|
||||
build_environment: "conda 3.8 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_mac_build:
|
||||
name: binary_macos_conda_3_9_cpu_nightly_build
|
||||
build_environment: "conda 3.9 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_mac_build:
|
||||
name: binary_macos_conda_3_10_cpu_nightly_build
|
||||
build_environment: "conda 3.10 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_mac_build:
|
||||
name: binary_macos_libtorch_3_7_cpu_nightly_build
|
||||
build_environment: "libtorch 3.7 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_macos_arm64_build:
|
||||
name: binary_macos_arm64_wheel_3_8_cpu_nightly_build
|
||||
build_environment: "wheel 3.8 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_macos_arm64_build:
|
||||
name: binary_macos_arm64_wheel_3_9_cpu_nightly_build
|
||||
build_environment: "wheel 3.9 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_macos_arm64_build:
|
||||
name: binary_macos_arm64_conda_3_8_cpu_nightly_build
|
||||
build_environment: "conda 3.8 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_macos_arm64_build:
|
||||
name: binary_macos_arm64_conda_3_9_cpu_nightly_build
|
||||
build_environment: "conda 3.9 cpu"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
- binary_windows_build:
|
||||
name: binary_windows_conda_3_7_cpu_nightly_build
|
||||
build_environment: "conda 3.7 cpu"
|
||||
@ -2042,6 +2172,188 @@ workflows:
|
||||
requires:
|
||||
- binary_windows_conda_3_10_cu115_nightly_build
|
||||
executor: windows-with-nvidia-gpu
|
||||
- binary_upload:
|
||||
name: binary_macos_wheel_3_7_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_wheel_3_7_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: wheel
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_wheel_3_8_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_wheel_3_8_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: wheel
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_wheel_3_9_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_wheel_3_9_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: wheel
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_wheel_3_10_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_wheel_3_10_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: wheel
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_conda_3_7_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_conda_3_7_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: conda
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_conda_3_8_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_conda_3_8_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: conda
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_conda_3_9_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_conda_3_9_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: conda
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_conda_3_10_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_conda_3_10_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: conda
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_libtorch_3_7_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_libtorch_3_7_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: libtorch
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_arm64_wheel_3_8_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_arm64_wheel_3_8_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: wheel
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_arm64_wheel_3_9_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_arm64_wheel_3_9_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: wheel
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_arm64_conda_3_8_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_arm64_conda_3_8_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: conda
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_macos_arm64_conda_3_9_cpu_nightly_upload
|
||||
context: org-member
|
||||
requires:
|
||||
- binary_macos_arm64_conda_3_9_cpu_nightly_build
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only:
|
||||
- /v[0-9]+(\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: conda
|
||||
upload_subfolder: cpu
|
||||
- binary_upload:
|
||||
name: binary_windows_conda_3_7_cpu_nightly_upload
|
||||
context: org-member
|
||||
@ -2498,6 +2810,87 @@ workflows:
|
||||
only:
|
||||
- postnightly
|
||||
name: update_s3_htmls
|
||||
- smoke_mac_test:
|
||||
name: smoke_macos_wheel_3_7_cpu_nightly
|
||||
build_environment: "wheel 3.7 cpu"
|
||||
requires:
|
||||
- update_s3_htmls
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- postnightly
|
||||
- smoke_mac_test:
|
||||
name: smoke_macos_wheel_3_8_cpu_nightly
|
||||
build_environment: "wheel 3.8 cpu"
|
||||
requires:
|
||||
- update_s3_htmls
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- postnightly
|
||||
- smoke_mac_test:
|
||||
name: smoke_macos_wheel_3_9_cpu_nightly
|
||||
build_environment: "wheel 3.9 cpu"
|
||||
requires:
|
||||
- update_s3_htmls
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- postnightly
|
||||
- smoke_mac_test:
|
||||
name: smoke_macos_wheel_3_10_cpu_nightly
|
||||
build_environment: "wheel 3.10 cpu"
|
||||
requires:
|
||||
- update_s3_htmls
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- postnightly
|
||||
- smoke_mac_test:
|
||||
name: smoke_macos_conda_3_7_cpu_nightly
|
||||
build_environment: "conda 3.7 cpu"
|
||||
requires:
|
||||
- update_s3_htmls
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- postnightly
|
||||
- smoke_mac_test:
|
||||
name: smoke_macos_conda_3_8_cpu_nightly
|
||||
build_environment: "conda 3.8 cpu"
|
||||
requires:
|
||||
- update_s3_htmls
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- postnightly
|
||||
- smoke_mac_test:
|
||||
name: smoke_macos_conda_3_9_cpu_nightly
|
||||
build_environment: "conda 3.9 cpu"
|
||||
requires:
|
||||
- update_s3_htmls
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- postnightly
|
||||
- smoke_mac_test:
|
||||
name: smoke_macos_conda_3_10_cpu_nightly
|
||||
build_environment: "conda 3.10 cpu"
|
||||
requires:
|
||||
- update_s3_htmls
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- postnightly
|
||||
- smoke_mac_test:
|
||||
name: smoke_macos_libtorch_3_7_cpu_nightly
|
||||
build_environment: "libtorch 3.7 cpu"
|
||||
requires:
|
||||
- update_s3_htmls
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- postnightly
|
||||
- smoke_windows_test:
|
||||
name: smoke_windows_conda_3_7_cpu_nightly
|
||||
build_environment: "conda 3.7 cpu"
|
||||
|
@ -1,15 +1,28 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
source "${BINARY_ENV_FILE:-/Users/distiller/project/env}"
|
||||
source "/Users/distiller/project/env"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
# For some reason `unbuffer` breaks if we change the PATH here, so we
|
||||
# write a script with the PATH change in it and unbuffer the whole
|
||||
# thing
|
||||
build_script="$workdir/build_script.sh"
|
||||
touch "$build_script"
|
||||
chmod +x "$build_script"
|
||||
|
||||
# Build
|
||||
export USE_PYTORCH_METAL_EXPORT=1
|
||||
export USE_COREML_DELEGATE=1
|
||||
cat >"$build_script" <<EOL
|
||||
export PATH="$workdir/miniconda/bin:$PATH"
|
||||
if [[ "$CIRCLE_BRANCH" == "nightly" ]]; then
|
||||
export USE_PYTORCH_METAL_EXPORT=1
|
||||
export USE_COREML_DELEGATE=1
|
||||
fi
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
"${BUILDER_ROOT}/conda/build_pytorch.sh"
|
||||
"$workdir/builder/conda/build_pytorch.sh"
|
||||
else
|
||||
export TORCH_PACKAGE_NAME="$(echo $TORCH_PACKAGE_NAME | tr '-' '_')"
|
||||
"${BUILDER_ROOT}/wheel/build_wheel.sh"
|
||||
"$workdir/builder/wheel/build_wheel.sh"
|
||||
fi
|
||||
EOL
|
||||
unbuffer "$build_script" | ts
|
||||
|
20
.github/generated-ciflow-ruleset.json
generated
vendored
20
.github/generated-ciflow-ruleset.json
generated
vendored
@ -60,33 +60,21 @@
|
||||
"linux-binary-libtorch-cxx11-abi",
|
||||
"linux-binary-libtorch-pre-cxx11",
|
||||
"linux-binary-manywheel",
|
||||
"macos-arm64-binary-conda",
|
||||
"macos-arm64-binary-wheel",
|
||||
"macos-binary-conda",
|
||||
"macos-binary-libtorch-cxx11-abi",
|
||||
"macos-binary-libtorch-pre-cxx11",
|
||||
"macos-binary-wheel",
|
||||
"windows-binary-libtorch-cxx11-abi",
|
||||
"windows-binary-libtorch-pre-cxx11",
|
||||
"windows-binary-wheel"
|
||||
],
|
||||
"ciflow/binaries_conda": [
|
||||
"linux-binary-conda",
|
||||
"macos-arm64-binary-conda",
|
||||
"macos-binary-conda"
|
||||
"linux-binary-conda"
|
||||
],
|
||||
"ciflow/binaries_libtorch": [
|
||||
"linux-binary-libtorch-cxx11-abi",
|
||||
"linux-binary-libtorch-pre-cxx11",
|
||||
"macos-binary-libtorch-cxx11-abi",
|
||||
"macos-binary-libtorch-pre-cxx11",
|
||||
"windows-binary-libtorch-cxx11-abi",
|
||||
"windows-binary-libtorch-pre-cxx11"
|
||||
],
|
||||
"ciflow/binaries_wheel": [
|
||||
"linux-binary-manywheel",
|
||||
"macos-arm64-binary-wheel",
|
||||
"macos-binary-wheel",
|
||||
"windows-binary-wheel"
|
||||
],
|
||||
"ciflow/cpu": [
|
||||
@ -140,12 +128,6 @@
|
||||
"linux-xenial-py3.7-gcc5.4",
|
||||
"linux-xenial-py3.7-gcc7",
|
||||
"linux-xenial-py3.7-gcc7-no-ops",
|
||||
"macos-arm64-binary-conda",
|
||||
"macos-arm64-binary-wheel",
|
||||
"macos-binary-conda",
|
||||
"macos-binary-libtorch-cxx11-abi",
|
||||
"macos-binary-libtorch-pre-cxx11",
|
||||
"macos-binary-wheel",
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single",
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single-full-jit",
|
||||
"win-vs2019-cpu-py3",
|
||||
|
21
.github/scripts/generate_binary_build_matrix.py
vendored
21
.github/scripts/generate_binary_build_matrix.py
vendored
@ -79,15 +79,12 @@ def list_without(in_list: List[str], without: List[str]) -> List[str]:
|
||||
def generate_conda_matrix(os: str) -> List[Dict[str, str]]:
|
||||
ret: List[Dict[str, str]] = []
|
||||
arches = ["cpu"]
|
||||
python_versions = FULL_PYTHON_VERSIONS
|
||||
if os == "linux":
|
||||
arches += CUDA_ARCHES
|
||||
elif os == "windows":
|
||||
# We don't build CUDA 10.2 for window see https://github.com/pytorch/pytorch/issues/65648
|
||||
arches += list_without(CUDA_ARCHES, ["10.2"])
|
||||
elif os == "macos-arm64":
|
||||
python_versions = list_without(python_versions, ["3.7"])
|
||||
for python_version in python_versions:
|
||||
for python_version in FULL_PYTHON_VERSIONS:
|
||||
# We don't currently build conda packages for rocm
|
||||
for arch_version in arches:
|
||||
gpu_arch_type = arch_type(arch_version)
|
||||
@ -156,7 +153,6 @@ def generate_libtorch_matrix(os: str, abi_version: str) -> List[Dict[str, str]]:
|
||||
def generate_wheels_matrix(os: str) -> List[Dict[str, str]]:
|
||||
arches = ["cpu"]
|
||||
package_type = "wheel"
|
||||
python_versions = FULL_PYTHON_VERSIONS
|
||||
if os == "linux":
|
||||
arches += CUDA_ARCHES + ROCM_ARCHES
|
||||
# NOTE: We only build manywheel packages for linux
|
||||
@ -164,10 +160,8 @@ def generate_wheels_matrix(os: str) -> List[Dict[str, str]]:
|
||||
elif os == "windows":
|
||||
# We don't build CUDA 10.2 for window see https://github.com/pytorch/pytorch/issues/65648
|
||||
arches += list_without(CUDA_ARCHES, ["10.2"])
|
||||
elif os == "macos-arm64":
|
||||
python_versions = list_without(python_versions, ["3.7"])
|
||||
ret: List[Dict[str, str]] = []
|
||||
for python_version in python_versions:
|
||||
for python_version in FULL_PYTHON_VERSIONS:
|
||||
for arch_version in arches:
|
||||
gpu_arch_type = arch_type(arch_version)
|
||||
gpu_arch_version = "" if arch_version == "cpu" else arch_version
|
||||
@ -187,3 +181,14 @@ def generate_wheels_matrix(os: str) -> List[Dict[str, str]]:
|
||||
}
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
def generate_binary_build_matrix(os: str) -> List[Dict[str, str]]:
|
||||
return {
|
||||
"linux": [
|
||||
*generate_conda_matrix(os),
|
||||
*generate_libtorch_matrix(os, abi_version=PRE_CXX11_ABI),
|
||||
*generate_libtorch_matrix(os, abi_version=CXX11_ABI),
|
||||
*generate_wheels_matrix(os),
|
||||
]
|
||||
}[os]
|
||||
|
72
.github/scripts/generate_ci_workflows.py
vendored
72
.github/scripts/generate_ci_workflows.py
vendored
@ -295,9 +295,6 @@ class BinaryBuildWorkflow:
|
||||
abi_version: str = ''
|
||||
ciflow_config: CIFlowConfig = field(default_factory=CIFlowConfig)
|
||||
is_scheduled: str = ''
|
||||
# Mainly for macos
|
||||
cross_compile_arm64: bool = False
|
||||
xcode_version: str = ''
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.abi_version:
|
||||
@ -305,6 +302,7 @@ class BinaryBuildWorkflow:
|
||||
else:
|
||||
self.build_environment = f"{self.os}-binary-{self.package_type}"
|
||||
|
||||
|
||||
def generate_workflow_file(self, workflow_template: jinja2.Template) -> None:
|
||||
output_file_path = GITHUB_DIR / f"workflows/generated-{self.build_environment}.yml"
|
||||
with open(output_file_path, "w") as output_file:
|
||||
@ -861,8 +859,6 @@ DOCKER_WORKFLOWS = [
|
||||
class OperatingSystem:
|
||||
LINUX = "linux"
|
||||
WINDOWS = "windows"
|
||||
MACOS = "macos"
|
||||
MACOS_ARM64 = "macos-arm64"
|
||||
|
||||
LINUX_BINARY_BUILD_WORFKLOWS = [
|
||||
BinaryBuildWorkflow(
|
||||
@ -956,71 +952,6 @@ WINDOWS_BINARY_BUILD_WORKFLOWS = [
|
||||
),
|
||||
]
|
||||
|
||||
MACOS_BINARY_BUILD_WORKFLOWS = [
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.MACOS,
|
||||
package_type="wheel",
|
||||
build_configs=generate_binary_build_matrix.generate_wheels_matrix(OperatingSystem.MACOS),
|
||||
ciflow_config=CIFlowConfig(
|
||||
labels={LABEL_CIFLOW_DEFAULT, LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.MACOS,
|
||||
package_type="conda",
|
||||
build_configs=generate_binary_build_matrix.generate_conda_matrix(OperatingSystem.MACOS),
|
||||
ciflow_config=CIFlowConfig(
|
||||
labels={LABEL_CIFLOW_DEFAULT, LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_CONDA},
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.MACOS,
|
||||
package_type="libtorch",
|
||||
abi_version=generate_binary_build_matrix.CXX11_ABI,
|
||||
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
|
||||
OperatingSystem.MACOS, generate_binary_build_matrix.CXX11_ABI
|
||||
),
|
||||
ciflow_config=CIFlowConfig(
|
||||
labels={LABEL_CIFLOW_DEFAULT, LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.MACOS,
|
||||
package_type="libtorch",
|
||||
abi_version=generate_binary_build_matrix.PRE_CXX11_ABI,
|
||||
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
|
||||
OperatingSystem.MACOS, generate_binary_build_matrix.PRE_CXX11_ABI
|
||||
),
|
||||
ciflow_config=CIFlowConfig(
|
||||
labels={LABEL_CIFLOW_DEFAULT, LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.MACOS_ARM64,
|
||||
package_type="wheel",
|
||||
build_configs=generate_binary_build_matrix.generate_wheels_matrix(OperatingSystem.MACOS),
|
||||
cross_compile_arm64=True,
|
||||
ciflow_config=CIFlowConfig(
|
||||
labels={LABEL_CIFLOW_DEFAULT, LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
BinaryBuildWorkflow(
|
||||
os=OperatingSystem.MACOS_ARM64,
|
||||
package_type="conda",
|
||||
cross_compile_arm64=True,
|
||||
build_configs=generate_binary_build_matrix.generate_conda_matrix(OperatingSystem.MACOS_ARM64),
|
||||
ciflow_config=CIFlowConfig(
|
||||
labels={LABEL_CIFLOW_DEFAULT, LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_CONDA},
|
||||
isolated_workflow=True,
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
def main() -> None:
|
||||
jinja_env = jinja2.Environment(
|
||||
variable_start_string="!{{",
|
||||
@ -1038,7 +969,6 @@ def main() -> None:
|
||||
(jinja_env.get_template("android_ci_workflow.yml.j2"), ANDROID_SHORT_WORKFLOWS),
|
||||
(jinja_env.get_template("linux_binary_build_workflow.yml.j2"), LINUX_BINARY_BUILD_WORFKLOWS),
|
||||
(jinja_env.get_template("windows_binary_build_workflow.yml.j2"), WINDOWS_BINARY_BUILD_WORKFLOWS),
|
||||
(jinja_env.get_template("macos_binary_build_workflow.yml.j2"), MACOS_BINARY_BUILD_WORKFLOWS),
|
||||
]
|
||||
# Delete the existing generated files first, this should align with .gitattributes file description.
|
||||
existing_workflows = GITHUB_DIR.glob("workflows/generated-*")
|
||||
|
4
.github/templates/common.yml.j2
vendored
4
.github/templates/common.yml.j2
vendored
@ -353,15 +353,13 @@ concurrency:
|
||||
./build_docker.sh
|
||||
{%- endmacro -%}
|
||||
|
||||
{%- macro setup_miniconda(python_version, activate_environment=True) -%}
|
||||
{%- macro setup_miniconda(python_version) -%}
|
||||
- name: Setup miniconda
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
auto-update-conda: true
|
||||
python-version: !{{ python_version }}
|
||||
{%- if activate_environment %}
|
||||
activate-environment: build
|
||||
{%- endif %}
|
||||
{%- endmacro -%}
|
||||
|
||||
{%- macro set_xcode_version(xcode_version) -%}
|
||||
|
181
.github/templates/macos_binary_build_workflow.yml.j2
vendored
181
.github/templates/macos_binary_build_workflow.yml.j2
vendored
@ -1,181 +0,0 @@
|
||||
{% import 'common.yml.j2' as common %}
|
||||
|
||||
{%- block name -%}
|
||||
# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: !{{ build_environment }}
|
||||
{%- endblock %}
|
||||
|
||||
{%- macro binary_env(config) -%}
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: !{{ config["package_type"] }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
{%- if config["package_type"] == "libtorch" %}
|
||||
LIBTORCH_VARIANT: !{{ config["libtorch_variant"] }}
|
||||
DESIRED_DEVTOOLSET: !{{ config["devtoolset"] }}
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
{%- else %}
|
||||
DESIRED_PYTHON: "!{{ config["python_version"] }}"
|
||||
{%- endif %}
|
||||
{%- endmacro %}
|
||||
|
||||
{%- macro set_runner_specific_vars() -%}
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
{%- endmacro %}
|
||||
|
||||
on:
|
||||
# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
{%- for label in ciflow_config.labels | sort %}
|
||||
{%- if label != "ciflow/default" %}
|
||||
- '!{{ label }}/*'
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
ANACONDA_USER: pytorch
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: !{{ build_environment }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IN_CI: 1
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
{%- if cross_compile_arm64 %}
|
||||
CROSS_COMPILE_ARM64: 1
|
||||
{% endif %}
|
||||
!{{ common.concurrency(build_environment) }}
|
||||
|
||||
jobs:
|
||||
{%- for config in build_configs %}
|
||||
!{{ config["build_name"] }}-build:
|
||||
runs-on: macos-10.15
|
||||
{%- if config["package_type"] == "libtorch" %}
|
||||
# libtorch builds take a long time on github hosted runners
|
||||
timeout-minutes: 720
|
||||
{%- else %}
|
||||
timeout-minutes: !{{ common.timeout_minutes }}
|
||||
{%- endif %}
|
||||
!{{ binary_env(config) }}
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
!{{ set_runner_specific_vars() }}
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
!{{ config["build_name"] }}-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: !{{ config["build_name"] }}-build
|
||||
!{{ binary_env(config) }}
|
||||
steps:
|
||||
!{{ common.setup_ec2_linux() }}
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
!{{ common.teardown_ec2_linux() }}
|
||||
{%- endfor %}
|
575
.github/workflows/generated-macos-arm64-binary-conda.yml
generated
vendored
575
.github/workflows/generated-macos-arm64-binary-conda.yml
generated
vendored
@ -1,575 +0,0 @@
|
||||
# @generated DO NOT EDIT MANUALLY
|
||||
# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: macos-arm64-binary-conda
|
||||
|
||||
on:
|
||||
# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
- 'ciflow/binaries/*'
|
||||
- 'ciflow/binaries_conda/*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
ANACONDA_USER: pytorch
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: macos-arm64-binary-conda
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IN_CI: 1
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
CROSS_COMPILE_ARM64: 1
|
||||
|
||||
concurrency:
|
||||
group: macos-arm64-binary-conda-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
conda-py3_8-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.8"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: conda-py3_8-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
conda-py3_8-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: conda-py3_8-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.8"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: conda-py3_8-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
conda-py3_9-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.9"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: conda-py3_9-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
conda-py3_9-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: conda-py3_9-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: conda-py3_9-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
conda-py3_10-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.10"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: conda-py3_10-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
conda-py3_10-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: conda-py3_10-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.10"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: conda-py3_10-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
754
.github/workflows/generated-macos-arm64-binary-wheel.yml
generated
vendored
754
.github/workflows/generated-macos-arm64-binary-wheel.yml
generated
vendored
@ -1,754 +0,0 @@
|
||||
# @generated DO NOT EDIT MANUALLY
|
||||
# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: macos-arm64-binary-wheel
|
||||
|
||||
on:
|
||||
# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
- 'ciflow/binaries/*'
|
||||
- 'ciflow/binaries_wheel/*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
ANACONDA_USER: pytorch
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: macos-arm64-binary-wheel
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IN_CI: 1
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
CROSS_COMPILE_ARM64: 1
|
||||
|
||||
concurrency:
|
||||
group: macos-arm64-binary-wheel-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
wheel-py3_7-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: wheel-py3_7-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
wheel-py3_7-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: wheel-py3_7-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_7-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
wheel-py3_8-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.8"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: wheel-py3_8-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
wheel-py3_8-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: wheel-py3_8-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.8"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_8-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
wheel-py3_9-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.9"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: wheel-py3_9-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
wheel-py3_9-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: wheel-py3_9-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_9-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
wheel-py3_10-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.10"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: wheel-py3_10-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
wheel-py3_10-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: wheel-py3_10-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.10"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_10-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
752
.github/workflows/generated-macos-binary-conda.yml
generated
vendored
752
.github/workflows/generated-macos-binary-conda.yml
generated
vendored
@ -1,752 +0,0 @@
|
||||
# @generated DO NOT EDIT MANUALLY
|
||||
# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: macos-binary-conda
|
||||
|
||||
on:
|
||||
# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
- 'ciflow/binaries/*'
|
||||
- 'ciflow/binaries_conda/*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
ANACONDA_USER: pytorch
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: macos-binary-conda
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IN_CI: 1
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
group: macos-binary-conda-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
conda-py3_7-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: conda-py3_7-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
conda-py3_7-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: conda-py3_7-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: conda-py3_7-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
conda-py3_8-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.8"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: conda-py3_8-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
conda-py3_8-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: conda-py3_8-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.8"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: conda-py3_8-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
conda-py3_9-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.9"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: conda-py3_9-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
conda-py3_9-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: conda-py3_9-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: conda-py3_9-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
conda-py3_10-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.10"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: conda-py3_10-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
conda-py3_10-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: conda-py3_10-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: conda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.10"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: conda-py3_10-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
788
.github/workflows/generated-macos-binary-libtorch-cxx11-abi.yml
generated
vendored
788
.github/workflows/generated-macos-binary-libtorch-cxx11-abi.yml
generated
vendored
@ -1,788 +0,0 @@
|
||||
# @generated DO NOT EDIT MANUALLY
|
||||
# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: macos-binary-libtorch-cxx11-abi
|
||||
|
||||
on:
|
||||
# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
- 'ciflow/binaries/*'
|
||||
- 'ciflow/binaries_libtorch/*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
ANACONDA_USER: pytorch
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: macos-binary-libtorch-cxx11-abi
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IN_CI: 1
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
group: macos-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
libtorch-cpu-shared-with-deps-cxx11-abi-build:
|
||||
runs-on: macos-10.15
|
||||
# libtorch builds take a long time on github hosted runners
|
||||
timeout-minutes: 720
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
libtorch-cpu-shared-with-deps-cxx11-abi-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: libtorch-cpu-shared-with-deps-cxx11-abi-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-cxx11-abi
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
libtorch-cpu-shared-without-deps-cxx11-abi-build:
|
||||
runs-on: macos-10.15
|
||||
# libtorch builds take a long time on github hosted runners
|
||||
timeout-minutes: 720
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: shared-without-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: libtorch-cpu-shared-without-deps-cxx11-abi
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
libtorch-cpu-shared-without-deps-cxx11-abi-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: libtorch-cpu-shared-without-deps-cxx11-abi-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: shared-without-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-shared-without-deps-cxx11-abi
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
libtorch-cpu-static-with-deps-cxx11-abi-build:
|
||||
runs-on: macos-10.15
|
||||
# libtorch builds take a long time on github hosted runners
|
||||
timeout-minutes: 720
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: static-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: libtorch-cpu-static-with-deps-cxx11-abi
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
libtorch-cpu-static-with-deps-cxx11-abi-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: libtorch-cpu-static-with-deps-cxx11-abi-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: static-with-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-static-with-deps-cxx11-abi
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
libtorch-cpu-static-without-deps-cxx11-abi-build:
|
||||
runs-on: macos-10.15
|
||||
# libtorch builds take a long time on github hosted runners
|
||||
timeout-minutes: 720
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: static-without-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: libtorch-cpu-static-without-deps-cxx11-abi
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
libtorch-cpu-static-without-deps-cxx11-abi-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: libtorch-cpu-static-without-deps-cxx11-abi-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: static-without-deps
|
||||
DESIRED_DEVTOOLSET: cxx11-abi
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-static-without-deps-cxx11-abi
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
788
.github/workflows/generated-macos-binary-libtorch-pre-cxx11.yml
generated
vendored
788
.github/workflows/generated-macos-binary-libtorch-pre-cxx11.yml
generated
vendored
@ -1,788 +0,0 @@
|
||||
# @generated DO NOT EDIT MANUALLY
|
||||
# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: macos-binary-libtorch-pre-cxx11
|
||||
|
||||
on:
|
||||
# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
- 'ciflow/binaries/*'
|
||||
- 'ciflow/binaries_libtorch/*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
ANACONDA_USER: pytorch
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: macos-binary-libtorch-pre-cxx11
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IN_CI: 1
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
group: macos-binary-libtorch-pre-cxx11-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
libtorch-cpu-shared-with-deps-pre-cxx11-build:
|
||||
runs-on: macos-10.15
|
||||
# libtorch builds take a long time on github hosted runners
|
||||
timeout-minutes: 720
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
libtorch-cpu-shared-with-deps-pre-cxx11-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: libtorch-cpu-shared-with-deps-pre-cxx11-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: shared-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-shared-with-deps-pre-cxx11
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
libtorch-cpu-shared-without-deps-pre-cxx11-build:
|
||||
runs-on: macos-10.15
|
||||
# libtorch builds take a long time on github hosted runners
|
||||
timeout-minutes: 720
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: shared-without-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: libtorch-cpu-shared-without-deps-pre-cxx11
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
libtorch-cpu-shared-without-deps-pre-cxx11-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: libtorch-cpu-shared-without-deps-pre-cxx11-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: shared-without-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-shared-without-deps-pre-cxx11
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
libtorch-cpu-static-with-deps-pre-cxx11-build:
|
||||
runs-on: macos-10.15
|
||||
# libtorch builds take a long time on github hosted runners
|
||||
timeout-minutes: 720
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: static-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: libtorch-cpu-static-with-deps-pre-cxx11
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
libtorch-cpu-static-with-deps-pre-cxx11-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: libtorch-cpu-static-with-deps-pre-cxx11-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: static-with-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-static-with-deps-pre-cxx11
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
libtorch-cpu-static-without-deps-pre-cxx11-build:
|
||||
runs-on: macos-10.15
|
||||
# libtorch builds take a long time on github hosted runners
|
||||
timeout-minutes: 720
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: static-without-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: libtorch-cpu-static-without-deps-pre-cxx11
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
libtorch-cpu-static-without-deps-pre-cxx11-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: libtorch-cpu-static-without-deps-pre-cxx11-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: libtorch
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
LIBTORCH_VARIANT: static-without-deps
|
||||
DESIRED_DEVTOOLSET: pre-cxx11
|
||||
# This is a dummy value for libtorch to work correctly with our batch scripts
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: libtorch-cpu-static-without-deps-pre-cxx11
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
752
.github/workflows/generated-macos-binary-wheel.yml
generated
vendored
752
.github/workflows/generated-macos-binary-wheel.yml
generated
vendored
@ -1,752 +0,0 @@
|
||||
# @generated DO NOT EDIT MANUALLY
|
||||
# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
|
||||
# Generation script: .github/scripts/generate_ci_workflows.py
|
||||
name: macos-binary-wheel
|
||||
|
||||
on:
|
||||
# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
|
||||
push:
|
||||
# NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
|
||||
branches:
|
||||
- nightly
|
||||
tags:
|
||||
# NOTE: Binary build pipelines should only get triggered on release candidate builds
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
- 'ciflow/binaries/*'
|
||||
- 'ciflow/binaries_wheel/*'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
# Needed for conda builds
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
ANACONDA_USER: pytorch
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
BUILD_ENVIRONMENT: macos-binary-wheel
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
IN_CI: 1
|
||||
IS_GHA: 1
|
||||
PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SKIP_ALL_TESTS: 1
|
||||
concurrency:
|
||||
group: macos-binary-wheel-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
wheel-py3_7-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.7"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: wheel-py3_7-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
wheel-py3_7-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: wheel-py3_7-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.7"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_7-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
wheel-py3_8-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.8"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: wheel-py3_8-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
wheel-py3_8-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: wheel-py3_8-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.8"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_8-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
wheel-py3_9-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.9"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: wheel-py3_9-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
wheel-py3_9-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: wheel-py3_9-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.9"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_9-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
||||
wheel-py3_10-cpu-build:
|
||||
runs-on: macos-10.15
|
||||
timeout-minutes: 240
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.10"
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
steps:
|
||||
# NOTE: These environment variables are put here so that they can be applied on every job equally
|
||||
# They are also here because setting them at a workflow level doesn't give us access to the
|
||||
# runner.temp variable, which we need.
|
||||
- name: Populate binary env
|
||||
shell: bash
|
||||
run: |
|
||||
# shellcheck disable=SC2129
|
||||
echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
|
||||
# shellcheck disable=SC2129
|
||||
echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
|
||||
- name: Install conda and dependencies
|
||||
run: |
|
||||
# Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
|
||||
curl --retry 3 -o "${RUNNER_TEMP}/conda.sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "${RUNNER_TEMP}/conda.sh"
|
||||
/bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
|
||||
echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: ${{ env.PYTORCH_ROOT }}
|
||||
submodules: recursive
|
||||
- name: Clone pytorch/builder
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: pytorch/builder
|
||||
path: ${{ env.BUILDER_ROOT }}
|
||||
- name: Install sccache (only for non-forked PRs, and pushes to trunk)
|
||||
if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
|
||||
- name: Populate binary env
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
|
||||
- name: Build PyTorch binary
|
||||
run: |
|
||||
# shellcheck disable=SC1091
|
||||
source "${RUNNER_TEMP}/anaconda/bin/activate"
|
||||
"${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: always()
|
||||
with:
|
||||
name: wheel-py3_10-cpu
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
wheel-py3_10-cpu-upload: # Uploading
|
||||
runs-on: linux.2xlarge # self hosted runner to download ec2 artifacts
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: wheel-py3_10-cpu-build
|
||||
env:
|
||||
PYTORCH_ROOT: ${{ github.workspace }}/pytorch
|
||||
BUILDER_ROOT: ${{ github.workspace }}/builder
|
||||
PACKAGE_TYPE: wheel
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_CUDA: cpu
|
||||
DESIRED_PYTHON: "3.10"
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
- name: Log in to ECR
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: 5
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
- name: Chown workspace
|
||||
run: |
|
||||
retry () {
|
||||
"$@" || (sleep 1 && "$@") || (sleep 2 && "$@")
|
||||
}
|
||||
retry docker pull "${ALPINE_IMAGE}"
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --pull=never --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Clean workspace
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}"
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
uses: seemethere/add-github-ssh-key@v1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Preserve github env variables for use in docker
|
||||
run: |
|
||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
- name: Clone pytorch/pytorch
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: wheel-py3_10-cpu
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
- name: Set DRY_RUN (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
|
||||
- name: Set UPLOAD_CHANNEL (only for tagged pushes)
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/')}}
|
||||
run: |
|
||||
# reference ends with an RC suffix
|
||||
if [[ ${GITHUB_REF_NAME} = *-rc[0-9]* ]]; then
|
||||
echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
|
||||
fi
|
||||
- name: Upload binaries
|
||||
env:
|
||||
PKG_DIR: "${{ runner.temp }}/artifacts"
|
||||
UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
|
||||
# When running these on pull_request events these should be blank
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_SECRET_KEY }}
|
||||
ANACONDA_API_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
||||
run: |
|
||||
docker run --rm -i \
|
||||
-e ANACONDA_API_TOKEN \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e DRY_RUN \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PKG_DIR=/artifacts \
|
||||
-e UPLOAD_CHANNEL \
|
||||
-e UPLOAD_SUBFOLDER \
|
||||
-v "${RUNNER_TEMP}/artifacts:/artifacts" \
|
||||
-v "${GITHUB_WORKSPACE}:/v" \
|
||||
-w /v \
|
||||
308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/miniconda3:4.10.3 \
|
||||
bash -c '.circleci/scripts/binary_upload.sh'
|
||||
- name: Hold runner for 2 hours or until ssh sessions have drained
|
||||
# Always hold for active ssh sessions
|
||||
if: always()
|
||||
run: .github/scripts/wait_for_ssh_to_drain.sh
|
||||
- name: Chown workspace
|
||||
if: always()
|
||||
run: |
|
||||
# Ensure the working directory gets chowned back to the current user
|
||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
- name: Kill containers, clean up images
|
||||
if: always()
|
||||
run: |
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
# Prune all of the docker images
|
||||
docker system prune -af
|
Reference in New Issue
Block a user