mirror of
https://github.com/pytorch/pytorch.git
synced 2025-11-03 23:45:05 +08:00
Compare commits
41 Commits
v1.13.0-rc
...
v1.11.0-rc
| Author | SHA1 | Date | |
|---|---|---|---|
| 46f85865c0 | |||
| a556333dfa | |||
| 3c14fe2151 | |||
| 055052bf64 | |||
| 68ef2a2188 | |||
| 9647fb7d18 | |||
| e4944871c8 | |||
| bbf2c0e3c6 | |||
| e6e8877bc2 | |||
| eaa80c6fd8 | |||
| 7fa092949e | |||
| 74cd18623e | |||
| dad4c2d032 | |||
| 565742cb63 | |||
| 7acb591cf9 | |||
| a5d5a6ad4f | |||
| ea5089751f | |||
| d216c83667 | |||
| f7e0ca546c | |||
| 89ee69e173 | |||
| e0aad8e864 | |||
| 28ad47f553 | |||
| 2cc3c2ef38 | |||
| b0037f707f | |||
| b6a3176c1c | |||
| 5fac320809 | |||
| 1f406fe91d | |||
| 6a46b2e2aa | |||
| 503a0923d3 | |||
| 6641e9b75f | |||
| 4f9f0e7a13 | |||
| 0ea924fc98 | |||
| 5a78725c29 | |||
| f72151b900 | |||
| 8380187819 | |||
| 7cc129e60c | |||
| ff6c348762 | |||
| 03a283b2b1 | |||
| 614e765575 | |||
| 7b0e140ecc | |||
| 3fab33e1c9 |
63
.azure_pipelines/build-pipeline.yml
Normal file
63
.azure_pipelines/build-pipeline.yml
Normal file
@ -0,0 +1,63 @@
|
||||
# PyTorch CI Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on select configurations
|
||||
# 2) runs only TestTorch unit tests.
|
||||
|
||||
stages:
|
||||
- stage: 'Build'
|
||||
displayName: 'Build PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
82
.azure_pipelines/daily-pipeline.yml
Normal file
82
.azure_pipelines/daily-pipeline.yml
Normal file
@ -0,0 +1,82 @@
|
||||
# PyTorch Daily Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on all available configurations
|
||||
# 2) runs all PyTorch unit tests
|
||||
|
||||
stages:
|
||||
- stage: 'BuildTest'
|
||||
displayName: 'Build and Test PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
@ -0,0 +1,134 @@
|
||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
container_endpoint: ''
|
||||
os: ''
|
||||
cuda: ''
|
||||
is_ci_build: False
|
||||
is_official_build: False
|
||||
is_daily_build: False
|
||||
build_stage: False
|
||||
verify_stage: False
|
||||
publish_stage: False
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 300
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
DECODE_PERCENTS: false
|
||||
container:
|
||||
image: $[variables['container_image']]
|
||||
endpoint: ${{parameters.container_endpoint}}
|
||||
|
||||
steps:
|
||||
# Build stage
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Set up environment variables for specific pipeline build
|
||||
- template: set-environment-variables.yml
|
||||
parameters:
|
||||
os: ${{ parameters.os}}
|
||||
cuda: ${{ parameters.cuda}}
|
||||
is_official_build: ${{ parameters.is_official_build}}
|
||||
|
||||
# Sync and update PyTorch submodules
|
||||
- bash: git submodule update --init --recursive --jobs 0
|
||||
displayName: Update PyTorch submodules
|
||||
|
||||
# Build PyTorch and run unit tests - no packaging
|
||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
||||
# Build PyTorch from source in develop mode
|
||||
- bash: python setup.py develop
|
||||
displayName: Build PyTorch from source
|
||||
|
||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
||||
- bash: python test/test_torch.py TestTorch
|
||||
displayName: Run TestTorch unit tests
|
||||
|
||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
||||
# Run all unit tests to demonstrate successful PyTorch build
|
||||
- bash: python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
||||
displayName: Run all unit tests
|
||||
|
||||
# Run ComponentGovernance
|
||||
- task: ComponentGovernanceComponentDetection@0
|
||||
inputs:
|
||||
scanType: 'Register'
|
||||
verbosity: 'Verbose'
|
||||
alertWarningLevel: 'High'
|
||||
|
||||
# Build PyTorch and produce artifacts for verification stage
|
||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
||||
# Build PyTorch from source in install mode and exclude test binaries
|
||||
- bash: python setup.py install
|
||||
displayName: Build PyTorch from source without test binaries
|
||||
|
||||
# Package PyTorch Wheel
|
||||
- bash: python setup.py bdist_wheel
|
||||
displayName: Package PyTorch Wheel
|
||||
|
||||
# Publish PyTorch Wheel
|
||||
- task: PublishPipelineArtifact@1
|
||||
inputs:
|
||||
targetPath: $(Build.SourcesDirectory)/dist/
|
||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
||||
|
||||
# Verification stage
|
||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)/verify
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Install PyTorch Wheel on Windows
|
||||
- bash: python -m pip install $(Build.SourcesDirectory)/verify/torch*linux*.whl
|
||||
displayName: Install PyTorch Wheel
|
||||
|
||||
# Ensure PyTorch installed correctly from produced wheel
|
||||
- bash: |
|
||||
cd $(Build.SourcesDirectory)/verify
|
||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
||||
displayName: Check PyTorch correctly installed from wheel
|
||||
|
||||
# Publishing stage
|
||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)/publish
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Publish wheel to Azure Artifacts
|
||||
# The flag continueOnError=true is needed as the artifact to be published
|
||||
# may already exist, because the artifact is differentiated based on the
|
||||
# last commit date.
|
||||
- bash: |
|
||||
export TORCH_VERSION=$(head -c 5 ./version.txt)
|
||||
export LAST_COMMIT=$(git rev-parse --short HEAD)
|
||||
export LAST_COMMIT_DATE=$(git log -1 --pretty=%ad --date=format:%Y%m%d)
|
||||
cd $(Build.SourcesDirectory)/publish
|
||||
export TORCH_WHEEL=$(echo torch*linux*whl)
|
||||
az extension add -n azure-devops
|
||||
echo $ADOTOKEN | az devops login
|
||||
az artifacts universal publish --organization $AZURE_DEVOPS_ARTIFACTS_ORGANIZATION --project $AZURE_DEVOPS_ARTIFACTS_PROJECT --scope project --feed "PyTorch" --name $TORCH_WHEEL --description "PyTorch Official Build Artifact" --version $TORCH_VERSION-$LAST_COMMIT_DATE-$LAST_COMMIT --path .
|
||||
env:
|
||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
continueOnError: true
|
||||
displayName: Upload PyTorch Official Build package to Azure Artifacts
|
||||
@ -0,0 +1,150 @@
|
||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
os: ''
|
||||
cuda: ''
|
||||
is_ci_build: False
|
||||
is_official_build: False
|
||||
is_daily_build: False
|
||||
build_stage: False
|
||||
verify_stage: False
|
||||
publish_stage: False
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 300
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
CMAKE_GENERATOR: Ninja
|
||||
PACKAGE_PDBS: 0
|
||||
|
||||
steps:
|
||||
# Prepare for PyTorch build on Windows
|
||||
- template: prepare-build-template.yml
|
||||
parameters:
|
||||
configuration: $(configuration)
|
||||
build_stage: ${{ parameters.build_stage}}
|
||||
|
||||
# Build Stage
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Set up environment variables for specific pipeline build
|
||||
- template: set-environment-variables.yml
|
||||
parameters:
|
||||
os: ${{ parameters.os}}
|
||||
cuda: ${{ parameters.cuda}}
|
||||
is_official_build: ${{ parameters.is_official_build}}
|
||||
|
||||
# Sync and update PyTorch submodules
|
||||
- script: git submodule update --init --recursive --jobs 0
|
||||
displayName: Update PyTorch submodules
|
||||
|
||||
# Build PyTorch and run unit tests - no packaging
|
||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
||||
# Build PyTorch from source in develop mode with Ninja
|
||||
- script: call activate $(configuration) && python setup.py develop
|
||||
displayName: Build PyTorch from source
|
||||
|
||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
||||
- script: call activate $(configuration) && python test\test_torch.py TestTorch
|
||||
displayName: Run TestTorch unit tests
|
||||
|
||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
||||
# Run all unit tests to demonstrate successful PyTorch build
|
||||
- script: call activate $(configuration) && python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
||||
displayName: Run all unit tests
|
||||
|
||||
# Run ComponentGovernance
|
||||
- task: ComponentGovernanceComponentDetection@0
|
||||
inputs:
|
||||
scanType: 'Register'
|
||||
verbosity: 'Verbose'
|
||||
alertWarningLevel: 'High'
|
||||
|
||||
# Build PyTorch and produce artifacts for verification stage
|
||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
||||
# Build PyTorch from source in install mode with Ninja and exclude test binaries
|
||||
- script: call activate $(configuration) && python setup.py install
|
||||
displayName: Build PyTorch from source without test binaries
|
||||
|
||||
# Package PyTorch Wheel
|
||||
- script: call activate $(configuration) && python setup.py bdist_wheel
|
||||
displayName: Package PyTorch Wheel
|
||||
|
||||
# Publish PyTorch Wheel
|
||||
- task: PublishPipelineArtifact@1
|
||||
inputs:
|
||||
targetPath: $(Build.SourcesDirectory)\dist\
|
||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
||||
|
||||
# Verification Stage
|
||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)\verify
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Install PyTorch Wheel on Windows
|
||||
- script: |
|
||||
call activate $(configuration)
|
||||
cd $(Build.SourcesDirectory)\verify
|
||||
dir torch*win*.whl /b > whl.txt
|
||||
set /p whl= < whl.txt
|
||||
python -m pip install %whl%
|
||||
displayName: Install PyTorch Wheel
|
||||
|
||||
# Ensure PyTorch installed correctly from produced wheel
|
||||
- script: |
|
||||
call activate $(configuration)
|
||||
cd $(Build.SourcesDirectory)\verify
|
||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
||||
displayName: Check PyTorch correctly installed from wheel
|
||||
|
||||
# Publishing stage
|
||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)\publish
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Set up Azure Artifacts for Windows
|
||||
# The pip install --upgrade command is a bug fix for Azure CLI on Windows
|
||||
# More info: https://github.com/Azure/azure-cli/issues/16858
|
||||
- script: |
|
||||
pip install --upgrade pip --target \opt\az\lib\python3.6\site-packages\
|
||||
az extension add -n azure-devops
|
||||
displayName: Set up Azure Artifacts download on Windows
|
||||
|
||||
# Publish wheel to Azure Artifacts
|
||||
# The flag continueOnError=true is needed as the artifact to be published
|
||||
# may already exist, because the artifact is differentiated based on the
|
||||
# last commit date.
|
||||
- script: |
|
||||
set /p TORCH_VERSION= < version.txt
|
||||
cd $(Build.SourcesDirectory)\publish
|
||||
git rev-parse --short HEAD > last_commit.txt && set /p LAST_COMMIT= < last_commit.txt
|
||||
git log -1 --pretty=%ad --date=format:%Y%m%d > last_commit_date.txt && set /p LAST_COMMIT_DATE= < last_commit_date.txt
|
||||
dir torch*win*.whl /b > whl.txt && set /p TORCH_WHEEL= < whl.txt
|
||||
echo %ADOTOKEN% | az devops login
|
||||
az artifacts universal publish --organization %AZURE_DEVOPS_ARTIFACTS_ORGANIZATION% --project %AZURE_DEVOPS_ARTIFACTS_PROJECT% --scope project --feed "PyTorch" --name %TORCH_WHEEL% --description "PyTorch Official Build Artifact" --version %TORCH_VERSION:~0,5%-%LAST_COMMIT_DATE%-%LAST_COMMIT% --path .
|
||||
env:
|
||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
continueOnError: true
|
||||
displayName: Upload PyTorch nigthly package to Azure Artifacts
|
||||
17
.azure_pipelines/job_templates/common-packages.yml
Normal file
17
.azure_pipelines/job_templates/common-packages.yml
Normal file
@ -0,0 +1,17 @@
|
||||
dependencies:
|
||||
- python=PYTHON_VERSION
|
||||
- numpy
|
||||
- ninja
|
||||
- pyyaml
|
||||
- mkl
|
||||
- mkl-include
|
||||
- setuptools
|
||||
- cmake
|
||||
- cffi
|
||||
- typing_extensions
|
||||
- future
|
||||
- six
|
||||
- requests
|
||||
- dataclasses
|
||||
- pip:
|
||||
- -r ../../requirements.txt
|
||||
26
.azure_pipelines/job_templates/notify-webapp-template.yml
Normal file
26
.azure_pipelines/job_templates/notify-webapp-template.yml
Normal file
@ -0,0 +1,26 @@
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
steps:
|
||||
# Clone PyTorch Tests repository
|
||||
- bash: |
|
||||
B64_PAT=$(echo -n ":$_ADOTOKEN" | base64)
|
||||
git -c http.extraHeader="Authorization: Basic ${B64_PAT}" clone $(AZURE_DEVOPS_PYTORCH_TESTS_REPO_URL)
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
- bash: |
|
||||
bash $(Build.SourcesDirectory)/pytorch_tests/webapp/notify_webapp.sh
|
||||
displayName: Notify Webapp
|
||||
62
.azure_pipelines/job_templates/prepare-build-template.yml
Normal file
62
.azure_pipelines/job_templates/prepare-build-template.yml
Normal file
@ -0,0 +1,62 @@
|
||||
# Build prepare steps for PyTorch on Azure DevOps to build from source.
|
||||
# These steps share between normal build process and semmle security scan tasks
|
||||
|
||||
parameters:
|
||||
build_stage: False
|
||||
configuration: ''
|
||||
|
||||
steps:
|
||||
# End Python tasks that may be lingering over from previous runs
|
||||
# Note: If python.exe isn't currently running, exit code becomes 128,
|
||||
# which fails the run. Here exit code is set to 0 to avoid failed run.
|
||||
- script: |
|
||||
taskkill /f /im python.exe
|
||||
IF %ERRORLEVEL% EQU 128 exit 0
|
||||
displayName: End previous Python processes
|
||||
|
||||
# Clean up env directory in conda for fresh builds and set up conda environment YAML
|
||||
- powershell: |
|
||||
Remove-Item 'C:\Miniconda\envs' -Recurse -ErrorAction Ignore
|
||||
$env:PYTHON_VERSION = $env:SYSTEM_JOBNAME.Substring(3,1) + '.' + $env:SYSTEM_JOBNAME.Substring(4,1)
|
||||
(Get-Content .azure_pipelines\job_templates\common-packages.yml) -replace 'PYTHON_VERSION', $env:PYTHON_VERSION | Out-File -encoding ASCII .azure_pipelines\job_templates\common-packages.yml
|
||||
displayName: Clean up previous environments and Set up conda environment YAML
|
||||
|
||||
# Make conda environment and install required packages
|
||||
- script: |
|
||||
call conda clean --all -y
|
||||
call conda env create -n $(configuration) --file .azure_pipelines\job_templates\common-packages.yml
|
||||
call activate $(configuration)
|
||||
call conda install -c conda-forge libuv=1.39
|
||||
displayName: Set up conda environment for building from source
|
||||
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Install MKL
|
||||
- script: |
|
||||
rmdir /s /q mkl
|
||||
del mkl_2020.2.254.7z
|
||||
curl https://s3.amazonaws.com/ossci-windows/mkl_2020.2.254.7z -k -O
|
||||
7z x -aoa mkl_2020.2.254.7z -omkl
|
||||
displayName: Install MKL
|
||||
|
||||
# Install sccache and randomtemp
|
||||
# Related PyTorch GitHub issue: https://github.com/pytorch/pytorch/issues/25393
|
||||
# Related fix: https://github.com/pytorch/builder/pull/448/
|
||||
- script: |
|
||||
mkdir .\tmp_bin
|
||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output .\tmp_bin\sccache.exe
|
||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache-cl.exe --output .\tmp_bin\sccache-cl.exe
|
||||
copy .\tmp_bin\sccache.exe .\tmp_bin\nvcc.exe
|
||||
curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.4/randomtemp.exe --output .\tmp_bin\randomtemp.exe
|
||||
displayName: Install sccache and randomtemp
|
||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
||||
|
||||
# CUDA 11.2's CUB directory conflicts with CUDA 10.2 and 10.1
|
||||
# builds, where CUDA 11.2's CUB is injected into non-CUDA
|
||||
# 11.2 builds.
|
||||
- powershell: Remove-Item "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include\cub" -Recurse -ErrorAction Ignore
|
||||
displayName: Remove conflicting CUB from CUDA installation
|
||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
||||
|
||||
- powershell: Copy-Item -Path "F:\cuda_11_2\cub\" -Destination "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include" -Recurse
|
||||
displayName: Copy CUDA CUB for CUDA 11.2 build
|
||||
condition: eq(variables.CUDA_VERSION, '112')
|
||||
61
.azure_pipelines/job_templates/pytorch-template-unix.yml
Normal file
61
.azure_pipelines/job_templates/pytorch-template-unix.yml
Normal file
@ -0,0 +1,61 @@
|
||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
container_endpoint: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
DECODE_PERCENTS: false
|
||||
|
||||
steps:
|
||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
||||
- checkout: none
|
||||
|
||||
# Delete pytorch_tests repo from previous builds if exists
|
||||
- bash: rm -rf pytorch_tests/
|
||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
||||
|
||||
# Clone PyTorch Tests repository
|
||||
- bash: |
|
||||
B64_PAT=$(echo -n ":$_ADOTOKEN" | base64)
|
||||
git -c http.extraHeader="Authorization: Basic ${B64_PAT}" clone $(AZURE_DEVOPS_PYTORCH_TESTS_REPO_URL)
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
|
||||
# Run PyTorch Unit Tests
|
||||
- bash: bash $(Build.SourcesDirectory)/pytorch_tests/scripts/linux/run.sh
|
||||
env:
|
||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
||||
_TS_P: $(TS_PAT)
|
||||
_TS_SM_P: $(TS_SM_PAT)
|
||||
_AZUREML_CLONE_PASSWORD: $(AZUREML_CLONE_PASSWORD)
|
||||
_SPPASSWORD: $(SPPASSWORD)
|
||||
displayName: Run PyTorch Unit Tests
|
||||
|
||||
# Tests results are available outside the docker container since
|
||||
# the current directory is mounted as a volume of the container.
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
inputs:
|
||||
testResultsFiles: '**/test-*.xml'
|
||||
testRunTitle: 'Publish test results for Python'
|
||||
57
.azure_pipelines/job_templates/pytorch-template-win.yml
Normal file
57
.azure_pipelines/job_templates/pytorch-template-win.yml
Normal file
@ -0,0 +1,57 @@
|
||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
|
||||
steps:
|
||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
||||
- checkout: none
|
||||
|
||||
# Delete pytorch_tests repo from previous builds if exists
|
||||
- script: if exist "pytorch_tests/" rmdir "pytorch_tests/" /q /s
|
||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
||||
|
||||
# Clone PyTorch Tests repository
|
||||
- powershell: |
|
||||
$env:B64Pat = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes(":$env:_ADOTOKEN"))
|
||||
git -c http.extraHeader="Authorization: Basic $env:B64Pat" clone $env:AZURE_DEVOPS_pytorch_tests_REPO_URL
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
|
||||
# Run PyTorch Unit Tests
|
||||
- script: call $(Build.SourcesDirectory)\pytorch_tests\scripts\windows\run.bat
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
||||
_TS_P: $(TS_PAT)
|
||||
_TS_SM_P: $(TS_SM_PAT)
|
||||
displayName: Run PyTorch Unit Tests
|
||||
|
||||
# Tests results are available outside the docker container since
|
||||
# the current directory is mounted as a volume of the container.
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
inputs:
|
||||
testResultsFiles: '**\test-*.xml'
|
||||
testRunTitle: 'Publish test results for Python'
|
||||
129
.azure_pipelines/job_templates/set-environment-variables.yml
Normal file
129
.azure_pipelines/job_templates/set-environment-variables.yml
Normal file
@ -0,0 +1,129 @@
|
||||
# Set environment variables for specific configurations
|
||||
|
||||
parameters:
|
||||
is_official_build: False
|
||||
os: ''
|
||||
cuda: ''
|
||||
|
||||
steps:
|
||||
# Environment configuration steps for Ubuntu builds
|
||||
- ${{ if contains(parameters.os, 'ubuntu') }}:
|
||||
# Set configuration specific build flags
|
||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
||||
export PYTORCH_VERSION=$(head -c 5 ./version.txt)
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
||||
displayName: Set configuration-specific build flags
|
||||
|
||||
# Set PyTorch CPU/GPU build flags.
|
||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=USE_CUDA;]0"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
||||
displayName: Set CUDA-specific build flag for CPU builds
|
||||
|
||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=USE_CUDA;]1"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
||||
displayName: Set CUDA-specific build flag for GPU builds
|
||||
|
||||
# Set MKL environment variables
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]/opt/intel/lib:$CMAKE_LIBRARY_PATH"
|
||||
echo "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]/opt/intel/include:$CMAKE_INCLUDE_PATH"
|
||||
displayName: Set MKL paths
|
||||
|
||||
# View current environment variables
|
||||
- bash:
|
||||
printenv
|
||||
displayName: Show environment variables
|
||||
|
||||
# Environment configuration steps for Windows builds
|
||||
- ${{ if contains(parameters.os, 'windows') }}:
|
||||
# Set Conda Lib Path
|
||||
- powershell: Write-Host "##vso[task.setvariable variable=CONDA_LIB_PATH;]C:\Miniconda\envs\$(configuration)\Library\bin"
|
||||
displayName: Set Conda Lib Path
|
||||
|
||||
# Set configuration specific build flags
|
||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
||||
Set-Variable -Name PYTORCH_VERSION -Value (Get-Content .\version.txt).Substring(0,5)
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
||||
displayName: Set configuration-specific build flags
|
||||
|
||||
# Set PyTorch CPU/GPU build flags..
|
||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]0"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
||||
displayName: Set CUDA-specific build flag for CPU build
|
||||
|
||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]1"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
||||
displayName: Set CUDA-specific build flag for GPU build
|
||||
|
||||
# Set CUDA 11.2, 10.2 or 10.1 specific build flags
|
||||
- ${{ if eq(parameters.cuda, 'gpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5;8.0;8.6"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\"
|
||||
displayName: Set CUDA 11.2 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '112')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\"
|
||||
displayName: Set CUDA 10.2 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '102')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\"
|
||||
displayName: Set CUDA 10.1 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '101')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_BIN_PATH;]$env:CUDA_PATH\bin\"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_ROOT;]$env:CUDA_PATH"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_INCLUDE_DIR;]$env:CUDA_PATH\include\"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_LIBRARY;]$env:CUDA_PATH\lib\x64\"
|
||||
Write-Host "##vso[task.prependpath]$env:CUDA_PATH\bin"
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_NVCC_FLAGS;]-Xfatbin -compress-all --no-host-device-move-forward"
|
||||
Write-Host "##vso[task.setvariable variable=THRUST_IGNORE_CUB_VERSION_CHECK;]1"
|
||||
Write-Host "##vso[task.setvariable variable=NVTOOLSEXT_PATH;]C:\Program Files\NVIDIA Corporation\NvToolsExt\"
|
||||
displayName: Set CUDA environment variables
|
||||
|
||||
- powershell: |
|
||||
copy "$(CUDA_BIN_PATH)\cusparse*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cublas*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cudart*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\curand*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cufft*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cusolver*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cudnn*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\nvrtc*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64\nvToolsExt64_1.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CONDA_LIB_PATH)\libiomp*5md.dll" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CONDA_LIB_PATH)\uv.dll" $(Build.SourcesDirectory)\torch\lib
|
||||
displayName: Copy CUDA/cuDNN/libomp/libuv dlls to torch\lib
|
||||
|
||||
# Set MKL, sccache and randomtemp environment variables
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]$(Build.SourcesDirectory)\mkl\include"
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]$(Build.SourcesDirectory)\mkl\lib;$env:CMAKE_LIBRARY_PATH"
|
||||
Write-Host "##vso[task.setvariable variable=ADDITIONAL_PATH;]$(Build.SourcesDirectory)\tmp_bin"
|
||||
Write-Host "##vso[task.setvariable variable=SCCACHE_IDLE_TIMEOUT;]1500"
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_CUDA_COMPILER_LAUNCHER;]$(Build.SourcesDirectory)/tmp_bin/randomtemp.exe;$(Build.SourcesDirectory)/tmp_bin/sccache.exe"
|
||||
displayName: Set MKL, sccache and randomtemp environment variables
|
||||
|
||||
# View current environment variables
|
||||
- script:
|
||||
set
|
||||
displayName: Show environment variables
|
||||
14
.azure_pipelines/job_templates/wheel-wait-job-template.yml
Normal file
14
.azure_pipelines/job_templates/wheel-wait-job-template.yml
Normal file
@ -0,0 +1,14 @@
|
||||
# Main logic to initiate wait for PR artifact to be ready
|
||||
|
||||
steps:
|
||||
- task: InvokeRESTAPI@1
|
||||
displayName: 'Wait for job success and wheel ready'
|
||||
timeoutInMinutes: 60
|
||||
inputs:
|
||||
connectionType: 'connectedServiceName'
|
||||
serviceConnection: circleciconn
|
||||
method: 'POST'
|
||||
headers: '{"Content-Type":"application/json", "BranchName":"$(_TARGET_BRANCH_TO_CHECK)", "JobName":"$(TARGET_CIRCLECI_BUILD_PR)", "PRNumber":"$(_TARGET_PR_NUMBER)", "TargetCommit":"$(_TARGET_COMMIT)", "PlanUrl":"$(System.CollectionUri)", "ProjectId":"$(System.TeamProjectId)", "HubName":"$(System.HostType)", "PlanId":"$(System.PlanId)", "JobId":"$(System.JobId)", "TimelineId":"$(System.TimelineId)", "TaskInstanceId":"$(System.TaskInstanceId)", "AuthToken":"$(System.AccessToken)"}'
|
||||
body: ''
|
||||
urlSuffix: 'api/JobStatus'
|
||||
waitForCompletion: true
|
||||
92
.azure_pipelines/job_templates/wheel-wait-template.yml
Normal file
92
.azure_pipelines/job_templates/wheel-wait-template.yml
Normal file
@ -0,0 +1,92 @@
|
||||
# Initiate 5 agentless-server waiting jobs to check on the
|
||||
# status of PR artifact builds, for a maximum wait time of
|
||||
# 11*60 min=660 mins. These jobs will pass immediately
|
||||
# once targeted CircleCI build is ready.
|
||||
|
||||
jobs:
|
||||
- job: checkjob1
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob2
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob1
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob3
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob2
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob4
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob3
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob5
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob4
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob6
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob5
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob7
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob6
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob8
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob7
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob9
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob8
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob10
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob9
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob11
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob10
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
60
.azure_pipelines/nightly-pytorch-tests-pipeline.yml
Normal file
60
.azure_pipelines/nightly-pytorch-tests-pipeline.yml
Normal file
@ -0,0 +1,60 @@
|
||||
# PyTorch Nightly PyTorch Tests Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline runs custom PyTorch unit-tests on nightly
|
||||
# PyTorch wheels.
|
||||
|
||||
stages:
|
||||
- stage: 'NightlyCustomTests'
|
||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
||||
jobs:
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_1)
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_1)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_1)
|
||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
||||
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: $(BUILD_POOL_LIN_2)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_2)
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_2)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_2)
|
||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
||||
|
||||
- template: job_templates/pytorch-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: $(BUILD_POOL_WIN_1)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_1)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_1)
|
||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
||||
|
||||
- template: job_templates/pytorch-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: $(BUILD_POOL_WIN_2)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_2)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_2)
|
||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
||||
|
||||
- stage: 'NotifyWebapp'
|
||||
displayName: 'Notify Webapp that pipeline is finished'
|
||||
dependsOn: NightlyCustomTests
|
||||
condition: succeededOrFailed()
|
||||
jobs:
|
||||
- template: job_templates/notify-webapp-template.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
62
.azure_pipelines/pytorch-tests-pipeline.yml
Normal file
62
.azure_pipelines/pytorch-tests-pipeline.yml
Normal file
@ -0,0 +1,62 @@
|
||||
# PyTorch PR PyTorch Tests Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) ensures that CircleCI builds for a given PR
|
||||
# have finished, and that its artifacts are
|
||||
# ready for download
|
||||
# 2) runs custom PyTorch unit-tests on PyTorch
|
||||
# wheels generated during PR builds.
|
||||
|
||||
resources:
|
||||
webhooks:
|
||||
- webhook: GitHubPyTorchPRTrigger
|
||||
connection: GitHubPyTorchPRTriggerConnection
|
||||
filters:
|
||||
- path: repositoryName
|
||||
value: pytorch_tests
|
||||
|
||||
stages:
|
||||
- stage: 'EnsureArtifactsReady'
|
||||
displayName: 'Ensure PyTorch PR Artifacts are ready'
|
||||
jobs:
|
||||
- template: job_templates/wheel-wait-template.yml
|
||||
variables:
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_TARGET_PR_NUMBER: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
|
||||
- stage: 'PRCustomTests'
|
||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
||||
dependsOn: EnsureArtifactsReady
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: $(BUILD_POOL_PR)
|
||||
customMatrixes:
|
||||
PR_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_PR)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_PR)
|
||||
_TARGET_CIRCLECI_BUILD: $(TARGET_CIRCLECI_BUILD_PR)
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_TARGET_PR_NUMBER: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_PR)
|
||||
_RUN_TESTS: $(RUN_TESTS_PR)
|
||||
|
||||
- stage: 'NotifyWebapp'
|
||||
displayName: 'Notify Webapp that pipeline is finished'
|
||||
dependsOn: PRCustomTests
|
||||
condition: succeededOrFailed()
|
||||
jobs:
|
||||
- template: job_templates/notify-webapp-template.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
customMatrixes:
|
||||
PR_Notify_WebApp:
|
||||
_TARGET_CIRCLECI_BUILD: $(TARGET_CIRCLECI_BUILD_PR)
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_TARGET_PR_NUMBER: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
224
.azure_pipelines/verify-pipeline.yml
Normal file
224
.azure_pipelines/verify-pipeline.yml
Normal file
@ -0,0 +1,224 @@
|
||||
# PyTorch Official Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on all available configurations
|
||||
# 2) verifies PyTorch artifacts by installing them in a clean environment
|
||||
# and checking torch.__version_
|
||||
# 3) publishes official PyTorch artifacts to Azure DevOps Artifacts for consumption
|
||||
|
||||
stages:
|
||||
- stage: 'Build'
|
||||
displayName: 'Build PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- stage: 'Verify'
|
||||
displayName: 'Verify PyTorch wheels'
|
||||
dependsOn: Build
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- stage: 'Publish'
|
||||
displayName: 'Publish PyTorch wheels'
|
||||
dependsOn: Verify
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
109
.bazelrc
109
.bazelrc
@ -1,11 +1,10 @@
|
||||
build --cxxopt=--std=c++14
|
||||
build --copt=--std=c++14
|
||||
build --copt=-I.
|
||||
# Bazel does not support including its cc_library targets as system
|
||||
# headers. We work around this for generated code
|
||||
# (e.g. c10/macros/cmake_macros.h) by making the generated directory a
|
||||
# system include path.
|
||||
build --copt=-isystem --copt bazel-out/k8-fastbuild/bin
|
||||
build --copt=-isystem --copt bazel-out/darwin-fastbuild/bin
|
||||
build --experimental_ui_max_stdouterr_bytes=2048576
|
||||
|
||||
# Configuration to disable tty features for environments like CI
|
||||
@ -13,103 +12,15 @@ build:no-tty --curses no
|
||||
build:no-tty --progress_report_interval 10
|
||||
build:no-tty --show_progress_rate_limit 10
|
||||
|
||||
# Build with GPU support by default.
|
||||
build --define=cuda=true
|
||||
# rules_cuda configuration
|
||||
build --@rules_cuda//cuda:enable_cuda
|
||||
build --@rules_cuda//cuda:cuda_targets=sm_52
|
||||
build --@rules_cuda//cuda:compiler=nvcc
|
||||
build --repo_env=CUDA_PATH=/usr/local/cuda
|
||||
|
||||
# Configuration to build without GPU support
|
||||
build:cpu-only --define=cuda=false
|
||||
# Configuration to build with GPU support
|
||||
build:gpu --define=cuda=true
|
||||
# define a separate build folder for faster switching between configs
|
||||
build:cpu-only --platform_suffix=-cpu-only
|
||||
build:gpu --platform_suffix=-gpu
|
||||
# See the note on the config-less build for details about why we are
|
||||
# doing this. We must also do it for the "-cpu-only" platform suffix.
|
||||
build --copt=-isystem --copt=bazel-out/k8-fastbuild-cpu-only/bin
|
||||
# doing this. We must also do it for the "-gpu" platform suffix.
|
||||
build --copt=-isystem --copt=bazel-out/k8-fastbuild-gpu/bin
|
||||
# rules_cuda configuration
|
||||
build:cpu-only --@rules_cuda//cuda:enable_cuda=False
|
||||
|
||||
# Definition of --config=shell
|
||||
# interactive shell immediately before execution
|
||||
build:shell --run_under="//tools/bazel_tools:shellwrap"
|
||||
|
||||
# Disable all warnings for external repositories. We don't care about
|
||||
# their warnings.
|
||||
build --per_file_copt=^external/@-w
|
||||
|
||||
# Set additional warnings to error level.
|
||||
#
|
||||
# Implementation notes:
|
||||
# * we use file extensions to determine if we are using the C++
|
||||
# compiler or the cuda compiler
|
||||
# * we use ^// at the start of the regex to only permit matching
|
||||
# PyTorch files. This excludes external repos.
|
||||
#
|
||||
# Note that because this is logically a command-line flag, it is
|
||||
# considered the word on what warnings are enabled. This has the
|
||||
# unfortunate consequence of preventing us from disabling an error at
|
||||
# the target level because those flags will come before these flags in
|
||||
# the action invocation. Instead we provide per-file exceptions after
|
||||
# this.
|
||||
#
|
||||
# On the bright side, this means we don't have to more broadly apply
|
||||
# the exceptions to an entire target.
|
||||
#
|
||||
# Looking for CUDA flags? We have a cu_library macro that we can edit
|
||||
# directly. Look in //tools/rules:cu.bzl for details. Editing the
|
||||
# macro over this has the following advantages:
|
||||
# * making changes does not require discarding the Bazel analysis
|
||||
# cache
|
||||
# * it allows for selective overrides on individual targets since the
|
||||
# macro-level opts will come earlier than target level overrides
|
||||
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=all
|
||||
# The following warnings come from -Wall. We downgrade them from error
|
||||
# to warnings here.
|
||||
#
|
||||
# sign-compare has a tremendous amount of violations in the
|
||||
# codebase. It will be a lot of work to fix them, just disable it for
|
||||
# now.
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-sign-compare
|
||||
# We intentionally use #pragma unroll, which is compiler specific.
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-error=unknown-pragmas
|
||||
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=extra
|
||||
# The following warnings come from -Wextra. We downgrade them from error
|
||||
# to warnings here.
|
||||
#
|
||||
# unused-parameter-compare has a tremendous amount of violations in the
|
||||
# codebase. It will be a lot of work to fix them, just disable it for
|
||||
# now.
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-parameter
|
||||
# missing-field-parameters has both a large number of violations in
|
||||
# the codebase, but it also is used pervasively in the Python C
|
||||
# API. There are a couple of catches though:
|
||||
# * we use multiple versions of the Python API and hence have
|
||||
# potentially multiple different versions of each relevant
|
||||
# struct. They may have different numbers of fields. It will be
|
||||
# unwieldy to support multiple versions in the same source file.
|
||||
# * Python itself for many of these structs recommends only
|
||||
# initializing a subset of the fields. We should respect the API
|
||||
# usage conventions of our dependencies.
|
||||
#
|
||||
# Hence, we just disable this warning altogether. We may want to clean
|
||||
# up some of the clear-cut cases that could be risky, but we still
|
||||
# likely want to have this disabled for the most part.
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-missing-field-initializers
|
||||
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterCompositeExplicitAutograd\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterCompositeImplicitAutograd\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterMkldnnCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterNestedTensorCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterQuantizedCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterSparseCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterSparseCsrCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterNestedTensorMeta\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterSparseMeta\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterQuantizedMeta\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterZeroTensor\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:torch/csrc/lazy/generated/RegisterAutogradLazy\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:torch/csrc/lazy/generated/RegisterLazy\.cpp$'@-Wno-error=unused-function
|
||||
build:gpu --@rules_cuda//cuda:enable_cuda
|
||||
build:gpu --@rules_cuda//cuda:cuda_targets=sm_52
|
||||
build:gpu --@rules_cuda//cuda:compiler=nvcc
|
||||
build:gpu --repo_env=CUDA_PATH=/usr/local/cuda
|
||||
|
||||
@ -1,25 +0,0 @@
|
||||
[pt]
|
||||
is_oss=1
|
||||
|
||||
[buildfile]
|
||||
name = BUCK.oss
|
||||
includes = //tools/build_defs/select.bzl
|
||||
|
||||
[repositories]
|
||||
bazel_skylib = third_party/bazel-skylib/
|
||||
ovr_config = .
|
||||
|
||||
[download]
|
||||
in_build = true
|
||||
|
||||
[cxx]
|
||||
cxxflags = -std=c++17
|
||||
should_remap_host_platform = true
|
||||
cpp = /usr/bin/clang
|
||||
cc = /usr/bin/clang
|
||||
cxx = /usr/bin/clang++
|
||||
cxxpp = /usr/bin/clang++
|
||||
ld = /usr/bin/clang++
|
||||
|
||||
[project]
|
||||
default_flavors_mode=all
|
||||
498
.circleci/README.md
Normal file
498
.circleci/README.md
Normal file
@ -0,0 +1,498 @@
|
||||
Structure of CI
|
||||
===============
|
||||
|
||||
setup job:
|
||||
1. Does a git checkout
|
||||
2. Persists CircleCI scripts (everything in `.circleci`) into a workspace. Why?
|
||||
We don't always do a Git checkout on all subjobs, but we usually
|
||||
still want to be able to call scripts one way or another in a subjob.
|
||||
Persisting files this way lets us have access to them without doing a
|
||||
checkout. This workspace is conventionally mounted on `~/workspace`
|
||||
(this is distinguished from `~/project`, which is the conventional
|
||||
working directory that CircleCI will default to starting your jobs
|
||||
in.)
|
||||
3. Write out the commit message to `.circleci/COMMIT_MSG`. This is so
|
||||
we can determine in subjobs if we should actually run the jobs or
|
||||
not, even if there isn't a Git checkout.
|
||||
|
||||
|
||||
|
||||
|
||||
CircleCI configuration generator
|
||||
================================
|
||||
|
||||
One may no longer make changes to the `.circleci/config.yml` file directly.
|
||||
Instead, one must edit these Python scripts or files in the `verbatim-sources/` directory.
|
||||
|
||||
|
||||
Usage
|
||||
----------
|
||||
|
||||
1. Make changes to these scripts.
|
||||
2. Run the `regenerate.sh` script in this directory and commit the script changes and the resulting change to `config.yml`.
|
||||
|
||||
You'll see a build failure on GitHub if the scripts don't agree with the checked-in version.
|
||||
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
These scripts establish a single, authoritative source of documentation for the CircleCI configuration matrix.
|
||||
The documentation, in the form of diagrams, is automatically generated and cannot drift out of sync with the YAML content.
|
||||
|
||||
Furthermore, consistency is enforced within the YAML config itself, by using a single source of data to generate
|
||||
multiple parts of the file.
|
||||
|
||||
* Facilitates one-off culling/enabling of CI configs for testing PRs on special targets
|
||||
|
||||
Also see https://github.com/pytorch/pytorch/issues/17038
|
||||
|
||||
|
||||
Future direction
|
||||
----------------
|
||||
|
||||
### Declaring sparse config subsets
|
||||
See comment [here](https://github.com/pytorch/pytorch/pull/17323#pullrequestreview-206945747):
|
||||
|
||||
In contrast with a full recursive tree traversal of configuration dimensions,
|
||||
> in the future I think we actually want to decrease our matrix somewhat and have only a few mostly-orthogonal builds that taste as many different features as possible on PRs, plus a more complete suite on every PR and maybe an almost full suite nightly/weekly (we don't have this yet). Specifying PR jobs in the future might be easier to read with an explicit list when we come to this.
|
||||
|
||||
----------------
|
||||
----------------
|
||||
|
||||
# How do the binaries / nightlies / releases work?
|
||||
|
||||
### What is a binary?
|
||||
|
||||
A binary or package (used interchangeably) is a pre-built collection of c++ libraries, header files, python bits, and other files. We build these and distribute them so that users do not need to install from source.
|
||||
|
||||
A **binary configuration** is a collection of
|
||||
|
||||
* release or nightly
|
||||
* releases are stable, nightlies are beta and built every night
|
||||
* python version
|
||||
* linux: 3.5m, 3.6m 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
|
||||
* macos: 3.6, 3.7, 3.8
|
||||
* windows: 3.6, 3.7, 3.8
|
||||
* cpu version
|
||||
* cpu, cuda 9.0, cuda 10.0
|
||||
* The supported cuda versions occasionally change
|
||||
* operating system
|
||||
* Linux - these are all built on CentOS. There haven't been any problems in the past building on CentOS and using on Ubuntu
|
||||
* MacOS
|
||||
* Windows - these are built on Azure pipelines
|
||||
* devtoolset version (gcc compiler version)
|
||||
* This only matters on Linux cause only Linux uses gcc. tldr is gcc made a backwards incompatible change from gcc 4.8 to gcc 5, because it had to change how it implemented std::vector and std::string
|
||||
|
||||
### Where are the binaries?
|
||||
|
||||
The binaries are built in CircleCI. There are nightly binaries built every night at 9pm PST (midnight EST) and release binaries corresponding to Pytorch releases, usually every few months.
|
||||
|
||||
We have 3 types of binary packages
|
||||
|
||||
* pip packages - nightlies are stored on s3 (pip install -f \<a s3 url\>). releases are stored in a pip repo (pip install torch) (ask Soumith about this)
|
||||
* conda packages - nightlies and releases are both stored in a conda repo. Nighty packages have a '_nightly' suffix
|
||||
* libtorch packages - these are zips of all the c++ libraries, header files, and sometimes dependencies. These are c++ only
|
||||
* shared with dependencies (the only supported option for Windows)
|
||||
* static with dependencies
|
||||
* shared without dependencies
|
||||
* static without dependencies
|
||||
|
||||
All binaries are built in CircleCI workflows except Windows. There are checked-in workflows (committed into the .circleci/config.yml) to build the nightlies every night. Releases are built by manually pushing a PR that builds the suite of release binaries (overwrite the config.yml to build the release)
|
||||
|
||||
# CircleCI structure of the binaries
|
||||
|
||||
Some quick vocab:
|
||||
|
||||
* A \**workflow** is a CircleCI concept; it is a DAG of '**jobs**'. ctrl-f 'workflows' on https://github.com/pytorch/pytorch/blob/master/.circleci/config.yml to see the workflows.
|
||||
* **jobs** are a sequence of '**steps**'
|
||||
* **steps** are usually just a bash script or a builtin CircleCI command. *All steps run in new environments, environment variables declared in one script DO NOT persist to following steps*
|
||||
* CircleCI has a **workspace**, which is essentially a cache between steps of the *same job* in which you can store artifacts between steps.
|
||||
|
||||
## How are the workflows structured?
|
||||
|
||||
The nightly binaries have 3 workflows. We have one job (actually 3 jobs: build, test, and upload) per binary configuration
|
||||
|
||||
1. binary_builds
|
||||
1. every day midnight EST
|
||||
2. linux: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
3. macos: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
4. For each binary configuration, e.g. linux_conda_3.7_cpu there is a
|
||||
1. binary_linux_conda_3.7_cpu_build
|
||||
1. Builds the build. On linux jobs this uses the 'docker executor'.
|
||||
2. Persists the package to the workspace
|
||||
2. binary_linux_conda_3.7_cpu_test
|
||||
1. Loads the package to the workspace
|
||||
2. Spins up a docker image (on Linux), mapping the package and code repos into the docker
|
||||
3. Runs some smoke tests in the docker
|
||||
4. (Actually, for macos this is a step rather than a separate job)
|
||||
3. binary_linux_conda_3.7_cpu_upload
|
||||
1. Logs in to aws/conda
|
||||
2. Uploads the package
|
||||
2. update_s3_htmls
|
||||
1. every day 5am EST
|
||||
2. https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
3. See below for what these are for and why they're needed
|
||||
4. Three jobs that each examine the current contents of aws and the conda repo and update some html files in s3
|
||||
3. binarysmoketests
|
||||
1. every day
|
||||
2. https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
3. For each binary configuration, e.g. linux_conda_3.7_cpu there is a
|
||||
1. smoke_linux_conda_3.7_cpu
|
||||
1. Downloads the package from the cloud, e.g. using the official pip or conda instructions
|
||||
2. Runs the smoke tests
|
||||
|
||||
## How are the jobs structured?
|
||||
|
||||
The jobs are in https://github.com/pytorch/pytorch/tree/master/.circleci/verbatim-sources. Jobs are made of multiple steps. There are some shared steps used by all the binaries/smokes. Steps of these jobs are all delegated to scripts in https://github.com/pytorch/pytorch/tree/master/.circleci/scripts .
|
||||
|
||||
* Linux jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
* binary_linux_build.sh
|
||||
* binary_linux_test.sh
|
||||
* binary_linux_upload.sh
|
||||
* MacOS jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
* binary_macos_build.sh
|
||||
* binary_macos_test.sh
|
||||
* binary_macos_upload.sh
|
||||
* Update html jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
* These delegate from the pytorch/builder repo
|
||||
* https://github.com/pytorch/builder/blob/master/cron/update_s3_htmls.sh
|
||||
* https://github.com/pytorch/builder/blob/master/cron/upload_binary_sizes.sh
|
||||
* Smoke jobs (both linux and macos): https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
* These delegate from the pytorch/builder repo
|
||||
* https://github.com/pytorch/builder/blob/master/run_tests.sh
|
||||
* https://github.com/pytorch/builder/blob/master/smoke_test.sh
|
||||
* https://github.com/pytorch/builder/blob/master/check_binary.sh
|
||||
* Common shared code (shared across linux and macos): https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
* binary_checkout.sh - checks out pytorch/builder repo. Right now this also checks out pytorch/pytorch, but it shouldn't. pytorch/pytorch should just be shared through the workspace. This can handle being run before binary_populate_env.sh
|
||||
* binary_populate_env.sh - parses BUILD_ENVIRONMENT into the separate env variables that make up a binary configuration. Also sets lots of default values, the date, the version strings, the location of folders in s3, all sorts of things. This generally has to be run before other steps.
|
||||
* binary_install_miniconda.sh - Installs miniconda, cross platform. Also hacks this for the update_binary_sizes job that doesn't have the right env variables
|
||||
* binary_run_in_docker.sh - Takes a bash script file (the actual test code) from a hardcoded location, spins up a docker image, and runs the script inside the docker image
|
||||
|
||||
### **Why do the steps all refer to scripts?**
|
||||
|
||||
CircleCI creates a final yaml file by inlining every <<* segment, so if we were to keep all the code in the config.yml itself then the config size would go over 4 MB and cause infra problems.
|
||||
|
||||
### **What is binary_run_in_docker for?**
|
||||
|
||||
So, CircleCI has several executor types: macos, machine, and docker are the ones we use. The 'machine' executor gives you two cores on some linux vm. The 'docker' executor gives you considerably more cores (nproc was 32 instead of 2 back when I tried in February). Since the dockers are faster, we try to run everything that we can in dockers. Thus
|
||||
|
||||
* linux build jobs use the docker executor. Running them on the docker executor was at least 2x faster than running them on the machine executor
|
||||
* linux test jobs use the machine executor in order for them to properly interface with GPUs since docker executors cannot execute with attached GPUs
|
||||
* linux upload jobs use the machine executor. The upload jobs are so short that it doesn't really matter what they use
|
||||
* linux smoke test jobs use the machine executor for the same reason as the linux test jobs
|
||||
|
||||
binary_run_in_docker.sh is a way to share the docker start-up code between the binary test jobs and the binary smoke test jobs
|
||||
|
||||
### **Why does binary_checkout also checkout pytorch? Why shouldn't it?**
|
||||
|
||||
We want all the nightly binary jobs to run on the exact same git commit, so we wrote our own checkout logic to ensure that the same commit was always picked. Later circleci changed that to use a single pytorch checkout and persist it through the workspace (they did this because our config file was too big, so they wanted to take a lot of the setup code into scripts, but the scripts needed the code repo to exist to be called, so they added a prereq step called 'setup' to checkout the code and persist the needed scripts to the workspace). The changes to the binary jobs were not properly tested, so they all broke from missing pytorch code no longer existing. We hotfixed the problem by adding the pytorch checkout back to binary_checkout, so now there's two checkouts of pytorch on the binary jobs. This problem still needs to be fixed, but it takes careful tracing of which code is being called where.
|
||||
|
||||
# Azure Pipelines structure of the binaries
|
||||
|
||||
TODO: fill in stuff
|
||||
|
||||
## How are the workflows structured?
|
||||
|
||||
TODO: fill in stuff
|
||||
|
||||
## How are the jobs structured?
|
||||
|
||||
TODO: fill in stuff
|
||||
|
||||
# Code structure of the binaries (circleci agnostic)
|
||||
|
||||
## Overview
|
||||
|
||||
The code that runs the binaries lives in two places, in the normal [github.com/pytorch/pytorch](http://github.com/pytorch/pytorch), but also in [github.com/pytorch/builder](http://github.com/pytorch/builder), which is a repo that defines how all the binaries are built. The relevant code is
|
||||
|
||||
|
||||
```
|
||||
# All code needed to set-up environments for build code to run in,
|
||||
# but only code that is specific to the current CI system
|
||||
pytorch/pytorch
|
||||
- .circleci/ # Folder that holds all circleci related stuff
|
||||
- config.yml # GENERATED file that actually controls all circleci behavior
|
||||
- verbatim-sources # Used to generate job/workflow sections in ^
|
||||
- scripts/ # Code needed to prepare circleci environments for binary build scripts
|
||||
|
||||
- setup.py # Builds pytorch. This is wrapped in pytorch/builder
|
||||
- cmake files # used in normal building of pytorch
|
||||
|
||||
# All code needed to prepare a binary build, given an environment
|
||||
# with all the right variables/packages/paths.
|
||||
pytorch/builder
|
||||
|
||||
# Given an installed binary and a proper python env, runs some checks
|
||||
# to make sure the binary was built the proper way. Checks things like
|
||||
# the library dependencies, symbols present, etc.
|
||||
- check_binary.sh
|
||||
|
||||
# Given an installed binary, runs python tests to make sure everything
|
||||
# is in order. These should be de-duped. Right now they both run smoke
|
||||
# tests, but are called from different places. Usually just call some
|
||||
# import statements, but also has overlap with check_binary.sh above
|
||||
- run_tests.sh
|
||||
- smoke_test.sh
|
||||
|
||||
# Folders that govern how packages are built. See paragraphs below
|
||||
|
||||
- conda/
|
||||
- build_pytorch.sh # Entrypoint. Delegates to proper conda build folder
|
||||
- switch_cuda_version.sh # Switches activate CUDA installation in Docker
|
||||
- pytorch-nightly/ # Build-folder
|
||||
- manywheel/
|
||||
- build_cpu.sh # Entrypoint for cpu builds
|
||||
- build.sh # Entrypoint for CUDA builds
|
||||
- build_common.sh # Actual build script that ^^ call into
|
||||
- wheel/
|
||||
- build_wheel.sh # Entrypoint for wheel builds
|
||||
- windows/
|
||||
- build_pytorch.bat # Entrypoint for wheel builds on Windows
|
||||
```
|
||||
|
||||
Every type of package has an entrypoint build script that handles the all the important logic.
|
||||
|
||||
## Conda
|
||||
|
||||
Linux, MacOS and Windows use the same code flow for the conda builds.
|
||||
|
||||
Conda packages are built with conda-build, see https://conda.io/projects/conda-build/en/latest/resources/commands/conda-build.html
|
||||
|
||||
Basically, you pass `conda build` a build folder (pytorch-nightly/ above) that contains a build script and a meta.yaml. The meta.yaml specifies in what python environment to build the package in, and what dependencies the resulting package should have, and the build script gets called in the env to build the thing.
|
||||
tl;dr on conda-build is
|
||||
|
||||
1. Creates a brand new conda environment, based off of deps in the meta.yaml
|
||||
1. Note that environment variables do not get passed into this build env unless they are specified in the meta.yaml
|
||||
2. If the build fails this environment will stick around. You can activate it for much easier debugging. The “General Python” section below explains what exactly a python “environment” is.
|
||||
2. Calls build.sh in the environment
|
||||
3. Copies the finished package to a new conda env, also specified by the meta.yaml
|
||||
4. Runs some simple import tests (if specified in the meta.yaml)
|
||||
5. Saves the finished package as a tarball
|
||||
|
||||
The build.sh we use is essentially a wrapper around `python setup.py build`, but it also manually copies in some of our dependent libraries into the resulting tarball and messes with some rpaths.
|
||||
|
||||
The entrypoint file `builder/conda/build_conda.sh` is complicated because
|
||||
|
||||
* It works for Linux, MacOS and Windows
|
||||
* The mac builds used to create their own environments, since they all used to be on the same machine. There’s now a lot of extra logic to handle conda envs. This extra machinery could be removed
|
||||
* It used to handle testing too, which adds more logic messing with python environments too. This extra machinery could be removed.
|
||||
|
||||
## Manywheels (linux pip and libtorch packages)
|
||||
|
||||
Manywheels are pip packages for linux distros. Note that these manywheels are not actually manylinux compliant.
|
||||
|
||||
`builder/manywheel/build_cpu.sh` and `builder/manywheel/build.sh` (for CUDA builds) just set different env vars and then call into `builder/manywheel/build_common.sh`
|
||||
|
||||
The entrypoint file `builder/manywheel/build_common.sh` is really really complicated because
|
||||
|
||||
* This used to handle building for several different python versions at the same time. The loops have been removed, but there's still unnecessary folders and movements here and there.
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This used to handle testing the pip packages too. This is why there’s testing code at the end that messes with python installations and stuff
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This also builds libtorch packages
|
||||
* This should really be separate. libtorch packages are c++ only and have no python. They should not share infra with all the python specific stuff in this file.
|
||||
* There is a lot of messing with rpaths. This is necessary, but could be made much much simpler if the above issues were fixed.
|
||||
|
||||
## Wheels (MacOS pip and libtorch packages)
|
||||
|
||||
The entrypoint file `builder/wheel/build_wheel.sh` is complicated because
|
||||
|
||||
* The mac builds used to all run on one machine (we didn’t have autoscaling mac machines till circleci). So this script handled siloing itself by setting-up and tearing-down its build env and siloing itself into its own build directory.
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This also builds libtorch packages
|
||||
* Ditto the comment above. This should definitely be separated out.
|
||||
|
||||
Note that the MacOS Python wheels are still built in conda environments. Some of the dependencies present during build also come from conda.
|
||||
|
||||
## Windows Wheels (Windows pip and libtorch packages)
|
||||
|
||||
The entrypoint file `builder/windows/build_pytorch.bat` is complicated because
|
||||
|
||||
* This used to handle building for several different python versions at the same time. This is why there are loops everywhere
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This used to handle testing the pip packages too. This is why there’s testing code at the end that messes with python installations and stuff
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This also builds libtorch packages
|
||||
* This should really be separate. libtorch packages are c++ only and have no python. They should not share infra with all the python specific stuff in this file.
|
||||
|
||||
Note that the Windows Python wheels are still built in conda environments. Some of the dependencies present during build also come from conda.
|
||||
|
||||
## General notes
|
||||
|
||||
### Note on run_tests.sh, smoke_test.sh, and check_binary.sh
|
||||
|
||||
* These should all be consolidated
|
||||
* These must run on all OS types: MacOS, Linux, and Windows
|
||||
* These all run smoke tests at the moment. They inspect the packages some, maybe run a few import statements. They DO NOT run the python tests nor the cpp tests. The idea is that python tests on master and PR merges will catch all breakages. All these tests have to do is make sure the special binary machinery didn’t mess anything up.
|
||||
* There are separate run_tests.sh and smoke_test.sh because one used to be called by the smoke jobs and one used to be called by the binary test jobs (see circleci structure section above). This is still true actually, but these could be united into a single script that runs these checks, given an installed pytorch package.
|
||||
|
||||
### Note on libtorch
|
||||
|
||||
Libtorch packages are built in the wheel build scripts: manywheel/build_*.sh for linux and build_wheel.sh for mac. There are several things wrong with this
|
||||
|
||||
* It’s confusing. Most of those scripts deal with python specifics.
|
||||
* The extra conditionals everywhere severely complicate the wheel build scripts
|
||||
* The process for building libtorch is different from the official instructions (a plain call to cmake, or a call to a script)
|
||||
|
||||
### Note on docker images / Dockerfiles
|
||||
|
||||
All linux builds occur in docker images. The docker images are
|
||||
|
||||
* pytorch/conda-cuda
|
||||
* Has ALL CUDA versions installed. The script pytorch/builder/conda/switch_cuda_version.sh sets /usr/local/cuda to a symlink to e.g. /usr/local/cuda-10.0 to enable different CUDA builds
|
||||
* Also used for cpu builds
|
||||
* pytorch/manylinux-cuda90
|
||||
* pytorch/manylinux-cuda100
|
||||
* Also used for cpu builds
|
||||
|
||||
The Dockerfiles are available in pytorch/builder, but there is no circleci job or script to build these docker images, and they cannot be run locally (unless you have the correct local packages/paths). Only Soumith can build them right now.
|
||||
|
||||
### General Python
|
||||
|
||||
* This is still a good explanation of python installations https://caffe2.ai/docs/faq.html#why-do-i-get-import-errors-in-python-when-i-try-to-use-caffe2
|
||||
|
||||
# How to manually rebuild the binaries
|
||||
|
||||
tl;dr make a PR that looks like https://github.com/pytorch/pytorch/pull/21159
|
||||
|
||||
Sometimes we want to push a change to master and then rebuild all of today's binaries after that change. As of May 30, 2019 there isn't a way to manually run a workflow in the UI. You can manually re-run a workflow, but it will use the exact same git commits as the first run and will not include any changes. So we have to make a PR and then force circleci to run the binary workflow instead of the normal tests. The above PR is an example of how to do this; essentially you copy-paste the binarybuilds workflow steps into the default workflow steps. If you need to point the builder repo to a different commit then you'd need to change https://github.com/pytorch/pytorch/blob/master/.circleci/scripts/binary_checkout.sh#L42-L45 to checkout what you want.
|
||||
|
||||
## How to test changes to the binaries via .circleci
|
||||
|
||||
Writing PRs that test the binaries is annoying, since the default circleci jobs that run on PRs are not the jobs that you want to run. Likely, changes to the binaries will touch something under .circleci/ and require that .circleci/config.yml be regenerated (.circleci/config.yml controls all .circleci behavior, and is generated using `.circleci/regenerate.sh` in python 3.7). But you also need to manually hardcode the binary jobs that you want to test into the .circleci/config.yml workflow, so you should actually make at least two commits, one for your changes and one to temporarily hardcode jobs. See https://github.com/pytorch/pytorch/pull/22928 as an example of how to do this.
|
||||
|
||||
```sh
|
||||
# Make your changes
|
||||
touch .circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
|
||||
# Regenerate the yaml, has to be in python 3.7
|
||||
.circleci/regenerate.sh
|
||||
|
||||
# Make a commit
|
||||
git add .circleci *
|
||||
git commit -m "My real changes"
|
||||
git push origin my_branch
|
||||
|
||||
# Now hardcode the jobs that you want in the .circleci/config.yml workflows section
|
||||
# Also eliminate ensure-consistency and should_run_job checks
|
||||
# e.g. https://github.com/pytorch/pytorch/commit/2b3344bfed8772fe86e5210cc4ee915dee42b32d
|
||||
|
||||
# Make a commit you won't keep
|
||||
git add .circleci
|
||||
git commit -m "[DO NOT LAND] testing binaries for above changes"
|
||||
git push origin my_branch
|
||||
|
||||
# Now you need to make some changes to the first commit.
|
||||
git rebase -i HEAD~2 # mark the first commit as 'edit'
|
||||
|
||||
# Make the changes
|
||||
touch .circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
.circleci/regenerate.sh
|
||||
|
||||
# Ammend the commit and recontinue
|
||||
git add .circleci
|
||||
git commit --amend
|
||||
git rebase --continue
|
||||
|
||||
# Update the PR, need to force since the commits are different now
|
||||
git push origin my_branch --force
|
||||
```
|
||||
|
||||
The advantage of this flow is that you can make new changes to the base commit and regenerate the .circleci without having to re-write which binary jobs you want to test on. The downside is that all updates will be force pushes.
|
||||
|
||||
## How to build a binary locally
|
||||
|
||||
### Linux
|
||||
|
||||
You can build Linux binaries locally easily using docker.
|
||||
|
||||
```sh
|
||||
# Run the docker
|
||||
# Use the correct docker image, pytorch/conda-cuda used here as an example
|
||||
#
|
||||
# -v path/to/foo:path/to/bar makes path/to/foo on your local machine (the
|
||||
# machine that you're running the command on) accessible to the docker
|
||||
# container at path/to/bar. So if you then run `touch path/to/bar/baz`
|
||||
# in the docker container then you will see path/to/foo/baz on your local
|
||||
# machine. You could also clone the pytorch and builder repos in the docker.
|
||||
#
|
||||
# If you know how, add ccache as a volume too and speed up everything
|
||||
docker run \
|
||||
-v your/pytorch/repo:/pytorch \
|
||||
-v your/builder/repo:/builder \
|
||||
-v where/you/want/packages/to/appear:/final_pkgs \
|
||||
-it pytorch/conda-cuda /bin/bash
|
||||
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
# You should probably always export at least these 3 variables
|
||||
export PACKAGE_TYPE=conda
|
||||
export DESIRED_PYTHON=3.6
|
||||
export DESIRED_CUDA=cpu
|
||||
|
||||
# Call the entrypoint
|
||||
# `|& tee foo.log` just copies all stdout and stderr output to foo.log
|
||||
# The builds generate lots of output so you probably need this when
|
||||
# building locally.
|
||||
/builder/conda/build_pytorch.sh |& tee build_output.log
|
||||
```
|
||||
|
||||
**Building CUDA binaries on docker**
|
||||
|
||||
You can build CUDA binaries on CPU only machines, but you can only run CUDA binaries on CUDA machines. This means that you can build a CUDA binary on a docker on your laptop if you so choose (though it’s gonna take a long time).
|
||||
|
||||
For Facebook employees, ask about beefy machines that have docker support and use those instead of your laptop; it will be 5x as fast.
|
||||
|
||||
### MacOS
|
||||
|
||||
There’s no easy way to generate reproducible hermetic MacOS environments. If you have a Mac laptop then you can try emulating the .circleci environments as much as possible, but you probably have packages in /usr/local/, possibly installed by brew, that will probably interfere with the build. If you’re trying to repro an error on a Mac build in .circleci and you can’t seem to repro locally, then my best advice is actually to iterate on .circleci :/
|
||||
|
||||
But if you want to try, then I’d recommend
|
||||
|
||||
```sh
|
||||
# Create a new terminal
|
||||
# Clear your LD_LIBRARY_PATH and trim as much out of your PATH as you
|
||||
# know how to do
|
||||
|
||||
# Install a new miniconda
|
||||
# First remove any other python or conda installation from your PATH
|
||||
# Always install miniconda 3, even if building for Python <3
|
||||
new_conda="~/my_new_conda"
|
||||
conda_sh="$new_conda/install_miniconda.sh"
|
||||
curl -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
rm -f "$conda_sh"
|
||||
export PATH="~/my_new_conda/bin:$PATH"
|
||||
|
||||
# Create a clean python env
|
||||
# All MacOS builds use conda to manage the python env and dependencies
|
||||
# that are built with, even the pip packages
|
||||
conda create -yn binary python=2.7
|
||||
conda activate binary
|
||||
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
# You should probably always export at least these 3 variables
|
||||
export PACKAGE_TYPE=conda
|
||||
export DESIRED_PYTHON=3.6
|
||||
export DESIRED_CUDA=cpu
|
||||
|
||||
# Call the entrypoint you want
|
||||
path/to/builder/wheel/build_wheel.sh
|
||||
```
|
||||
|
||||
N.B. installing a brand new miniconda is important. This has to do with how conda installations work. See the “General Python” section above, but tldr; is that
|
||||
|
||||
1. You make the ‘conda’ command accessible by prepending `path/to/conda_root/bin` to your PATH.
|
||||
2. You make a new env and activate it, which then also gets prepended to your PATH. Now you have `path/to/conda_root/envs/new_env/bin:path/to/conda_root/bin:$PATH`
|
||||
3. Now say you (or some code that you ran) call python executable `foo`
|
||||
1. if you installed `foo` in `new_env`, then `path/to/conda_root/envs/new_env/bin/foo` will get called, as expected.
|
||||
2. But if you forgot to installed `foo` in `new_env` but happened to previously install it in your root conda env (called ‘base’), then unix/linux will still find `path/to/conda_root/bin/foo` . This is dangerous, since `foo` can be a different version than you want; `foo` can even be for an incompatible python version!
|
||||
|
||||
Newer conda versions and proper python hygiene can prevent this, but just install a new miniconda to be safe.
|
||||
|
||||
### Windows
|
||||
|
||||
TODO: fill in
|
||||
@ -31,6 +31,30 @@ def get_processor_arch_name(gpu_version):
|
||||
)
|
||||
|
||||
CONFIG_TREE_DATA = OrderedDict(
|
||||
macos=([None], OrderedDict(
|
||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7",
|
||||
],
|
||||
)),
|
||||
macos_arm64=([None], OrderedDict(
|
||||
wheel=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
],
|
||||
conda=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
],
|
||||
)),
|
||||
windows=(
|
||||
# Stop building Win+CU102, see https://github.com/pytorch/pytorch/issues/65648
|
||||
[v for v in dimensions.GPU_VERSIONS if v not in dimensions.ROCM_VERSION_LABELS and v != "cuda102"],
|
||||
OrderedDict(
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
# GCC config variants:
|
||||
|
||||
@ -2,9 +2,9 @@ PHASES = ["build", "test"]
|
||||
|
||||
CUDA_VERSIONS = [
|
||||
"102",
|
||||
"111",
|
||||
"113",
|
||||
"116",
|
||||
"117",
|
||||
"115",
|
||||
]
|
||||
|
||||
ROCM_VERSIONS = [
|
||||
|
||||
@ -71,11 +71,10 @@ class ExperimentalFeatureConfigNode(TreeConfigNode):
|
||||
next_nodes = {
|
||||
"asan": AsanConfigNode,
|
||||
"xla": XlaConfigNode,
|
||||
"mps": MPSConfigNode,
|
||||
"mlc": MLCConfigNode,
|
||||
"vulkan": VulkanConfigNode,
|
||||
"parallel_tbb": ParallelTBBConfigNode,
|
||||
"crossref": CrossRefConfigNode,
|
||||
"dynamo": DynamoConfigNode,
|
||||
"noarch": NoarchConfigNode,
|
||||
"parallel_native": ParallelNativeConfigNode,
|
||||
"onnx": ONNXConfigNode,
|
||||
"libtorch": LibTorchConfigNode,
|
||||
@ -117,12 +116,12 @@ class XlaConfigNode(TreeConfigNode):
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
class MPSConfigNode(TreeConfigNode):
|
||||
class MLCConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "MPS=" + str(label)
|
||||
return "MLC=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_mps"] = node_name
|
||||
self.props["is_mlc"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
@ -172,17 +171,9 @@ class ParallelTBBConfigNode(TreeConfigNode):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class CrossRefConfigNode(TreeConfigNode):
|
||||
class NoarchConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_crossref"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class DynamoConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_dynamo"] = node_name
|
||||
self.props["is_noarch"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
@ -185,7 +185,7 @@ def gen_docs_configs(xenial_parent_config):
|
||||
HiddenConf(
|
||||
"pytorch_python_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(branches_list=["master", "main", "nightly"],
|
||||
filters=gen_filter_dict(branches_list=["master", "nightly"],
|
||||
tags_list=RC_PATTERN),
|
||||
)
|
||||
)
|
||||
@ -201,7 +201,7 @@ def gen_docs_configs(xenial_parent_config):
|
||||
HiddenConf(
|
||||
"pytorch_cpp_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(branches_list=["master", "main", "nightly"],
|
||||
filters=gen_filter_dict(branches_list=["master", "nightly"],
|
||||
tags_list=RC_PATTERN),
|
||||
)
|
||||
)
|
||||
@ -239,8 +239,7 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
compiler_version = fc.find_prop("compiler_version")
|
||||
is_xla = fc.find_prop("is_xla") or False
|
||||
is_asan = fc.find_prop("is_asan") or False
|
||||
is_crossref = fc.find_prop("is_crossref") or False
|
||||
is_dynamo = fc.find_prop("is_dynamo") or False
|
||||
is_noarch = fc.find_prop("is_noarch") or False
|
||||
is_onnx = fc.find_prop("is_onnx") or False
|
||||
is_pure_torch = fc.find_prop("is_pure_torch") or False
|
||||
is_vulkan = fc.find_prop("is_vulkan") or False
|
||||
@ -284,11 +283,8 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list[0] = fc.find_prop("abbreviated_pyver")
|
||||
|
||||
if is_crossref:
|
||||
parms_list_ignored_for_docker_image.append("crossref")
|
||||
|
||||
if is_dynamo:
|
||||
parms_list_ignored_for_docker_image.append("dynamo")
|
||||
if is_noarch:
|
||||
parms_list_ignored_for_docker_image.append("noarch")
|
||||
|
||||
if is_onnx:
|
||||
parms_list.append("onnx")
|
||||
@ -338,12 +334,13 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
build_only=build_only,
|
||||
)
|
||||
|
||||
# run docs builds on "pytorch-linux-xenial-py3.7-gcc5.4". Docs builds
|
||||
# run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds
|
||||
# should run on a CPU-only build that runs on all PRs.
|
||||
# XXX should this be updated to a more modern build?
|
||||
# XXX should this be updated to a more modern build? Projects are
|
||||
# beginning to drop python3.6
|
||||
if (
|
||||
distro_name == "xenial"
|
||||
and fc.find_prop("pyver") == "3.7"
|
||||
and fc.find_prop("pyver") == "3.6"
|
||||
and cuda_version is None
|
||||
and parallel_backend is None
|
||||
and not is_vulkan
|
||||
|
||||
103
.circleci/cimodel/data/simple/android_definitions.py
Normal file
103
.circleci/cimodel/data/simple/android_definitions.py
Normal file
@ -0,0 +1,103 @@
|
||||
import cimodel.data.simple.util.branch_filters as branch_filters
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
|
||||
|
||||
class AndroidJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
template_name,
|
||||
is_master_only=True):
|
||||
|
||||
self.variant = variant
|
||||
self.template_name = template_name
|
||||
self.is_master_only = is_master_only
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"android",
|
||||
"ndk",
|
||||
"r19c",
|
||||
] + self.variant + [
|
||||
"build",
|
||||
]
|
||||
|
||||
full_job_name = "_".join(base_name_parts)
|
||||
build_env_name = "-".join(base_name_parts)
|
||||
|
||||
props_dict = {
|
||||
"name": full_job_name,
|
||||
"build_environment": "\"{}\"".format(build_env_name),
|
||||
"docker_image": "\"{}\"".format(DOCKER_IMAGE_NDK),
|
||||
"requires": [DOCKER_REQUIREMENT_NDK]
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
class AndroidGradleJob:
|
||||
def __init__(self,
|
||||
job_name,
|
||||
template_name,
|
||||
dependencies,
|
||||
is_master_only=True,
|
||||
is_pr_only=False,
|
||||
extra_props=tuple()):
|
||||
|
||||
self.job_name = job_name
|
||||
self.template_name = template_name
|
||||
self.dependencies = dependencies
|
||||
self.is_master_only = is_master_only
|
||||
self.is_pr_only = is_pr_only
|
||||
self.extra_props = dict(extra_props)
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
props_dict = {
|
||||
"name": self.job_name,
|
||||
"requires": self.dependencies,
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
||||
elif self.is_pr_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.PR_BRANCH_LIST)
|
||||
if self.extra_props:
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
AndroidJob(["x86_32"], "pytorch_linux_build", is_master_only=False),
|
||||
AndroidJob(["x86_64"], "pytorch_linux_build"),
|
||||
AndroidJob(["arm", "v7a"], "pytorch_linux_build"),
|
||||
AndroidJob(["arm", "v8a"], "pytorch_linux_build"),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build-x86_32",
|
||||
"pytorch_android_gradle_build-x86_32",
|
||||
["pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build"],
|
||||
is_master_only=False,
|
||||
is_pr_only=True),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build",
|
||||
"pytorch_android_gradle_build",
|
||||
["pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build"]),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
193
.circleci/cimodel/data/simple/binary_smoketest.py
Normal file
193
.circleci/cimodel/data/simple/binary_smoketest.py
Normal file
@ -0,0 +1,193 @@
|
||||
"""
|
||||
TODO: Refactor circleci/cimodel/data/binary_build_data.py to generate this file
|
||||
instead of doing one offs here
|
||||
Binary builds (subset, to smoke test that they'll work)
|
||||
|
||||
NB: If you modify this file, you need to also modify
|
||||
the binary_and_smoke_tests_on_pr variable in
|
||||
pytorch-ci-hud to adjust the allowed build list
|
||||
at https://github.com/ezyang/pytorch-ci-hud/blob/master/src/BuildHistoryDisplay.js
|
||||
|
||||
Note:
|
||||
This binary build is currently broken, see https://github_com/pytorch/pytorch/issues/16710
|
||||
- binary_linux_conda_3_6_cu90_devtoolset7_build
|
||||
- binary_linux_conda_3_6_cu90_devtoolset7_test
|
||||
|
||||
TODO
|
||||
we should test a libtorch cuda build, but they take too long
|
||||
- binary_linux_libtorch_3_6m_cu90_devtoolset7_static-without-deps_build
|
||||
"""
|
||||
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.data.simple.util.branch_filters
|
||||
|
||||
|
||||
class SmoketestJob:
|
||||
def __init__(self,
|
||||
template_name,
|
||||
build_env_parts,
|
||||
docker_image,
|
||||
job_name,
|
||||
is_master_only=False,
|
||||
requires=None,
|
||||
has_libtorch_variant=False,
|
||||
extra_props=None):
|
||||
|
||||
self.template_name = template_name
|
||||
self.build_env_parts = build_env_parts
|
||||
self.docker_image = docker_image
|
||||
self.job_name = job_name
|
||||
self.is_master_only = is_master_only
|
||||
self.requires = requires or []
|
||||
self.has_libtorch_variant = has_libtorch_variant
|
||||
self.extra_props = extra_props or {}
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
props_dict = {
|
||||
"build_environment": " ".join(self.build_env_parts),
|
||||
"name": self.job_name,
|
||||
"requires": self.requires,
|
||||
}
|
||||
|
||||
if self.docker_image:
|
||||
props_dict["docker_image"] = self.docker_image
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
|
||||
|
||||
if self.has_libtorch_variant:
|
||||
props_dict["libtorch_variant"] = "shared-with-deps"
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["manywheel", "3.7m", "cu102", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_manywheel_3_7m_cu102_devtoolset7_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build",
|
||||
is_master_only=True,
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
||||
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
||||
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build",
|
||||
is_master_only=False,
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_mac_build",
|
||||
["wheel", "3.7", "cpu"],
|
||||
None,
|
||||
"binary_macos_wheel_3_7_cpu_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
# This job has an average run time of 3 hours o.O
|
||||
# Now only running this on master to reduce overhead
|
||||
SmoketestJob(
|
||||
"binary_mac_build",
|
||||
["libtorch", "3.7", "cpu"],
|
||||
None,
|
||||
"binary_macos_libtorch_3_7_cpu_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["libtorch", "3.7", "cpu", "debug"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_debug_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["libtorch", "3.7", "cpu", "release"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_release_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["wheel", "3.7", "cu113"],
|
||||
None,
|
||||
"binary_windows_wheel_3_7_cu113_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["libtorch", "3.7", "cpu", "debug"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_debug_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_windows_libtorch_3_7_cpu_debug_build"],
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["libtorch", "3.7", "cpu", "release"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_release_test",
|
||||
is_master_only=False,
|
||||
requires=["binary_windows_libtorch_3_7_cpu_release_build"],
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["wheel", "3.7", "cu113"],
|
||||
None,
|
||||
"binary_windows_wheel_3_7_cu113_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_windows_wheel_3_7_cu113_build"],
|
||||
extra_props={
|
||||
"executor": "windows-with-nvidia-gpu",
|
||||
},
|
||||
),
|
||||
|
||||
|
||||
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["manywheel", "3.7m", "cu102", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_manywheel_3_7m_cu102_devtoolset7_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_manywheel_3_7m_cu102_devtoolset7_build"],
|
||||
extra_props={
|
||||
"resource_class": "gpu.medium",
|
||||
"use_cuda_docker_runtime": miniutils.quote((str(1))),
|
||||
},
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build"],
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
||||
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
||||
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build"],
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -26,7 +26,7 @@ def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False):
|
||||
"name": quote(f"docker-{image_name}"),
|
||||
"image_name": quote(image_name),
|
||||
})
|
||||
if image_name == "pytorch-linux-xenial-py3.7-gcc5.4":
|
||||
if image_name == "pytorch-linux-xenial-py3.6-gcc5.4":
|
||||
# pushing documentation on tags requires CircleCI to also
|
||||
# build all the dependencies on tags, including this docker image
|
||||
parameters['filters'] = gen_filter_dict(branches_list=r"/.*/",
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
from cimodel.data.simple.util.versions import MultiPartVersion
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict_exclude
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
XCODE_VERSION = MultiPartVersion([12, 5, 1])
|
||||
@ -12,7 +11,7 @@ class ArchVariant:
|
||||
|
||||
def render(self):
|
||||
extra_parts = [self.custom_build_name] if len(self.custom_build_name) > 0 else []
|
||||
return "-".join([self.name] + extra_parts).replace("_", "-")
|
||||
return "_".join([self.name] + extra_parts)
|
||||
|
||||
|
||||
def get_platform(arch_variant_name):
|
||||
@ -26,25 +25,30 @@ class IOSJob:
|
||||
self.is_org_member_context = is_org_member_context
|
||||
self.extra_props = extra_props
|
||||
|
||||
def gen_name_parts(self):
|
||||
version_parts = self.xcode_version.render_dots_or_parts("-")
|
||||
build_variant_suffix = self.arch_variant.render()
|
||||
def gen_name_parts(self, with_version_dots):
|
||||
|
||||
version_parts = self.xcode_version.render_dots_or_parts(with_version_dots)
|
||||
build_variant_suffix = "_".join([self.arch_variant.render(), "build"])
|
||||
|
||||
return [
|
||||
"pytorch",
|
||||
"ios",
|
||||
] + version_parts + [
|
||||
build_variant_suffix,
|
||||
]
|
||||
|
||||
def gen_job_name(self):
|
||||
return "-".join(self.gen_name_parts())
|
||||
return "_".join(self.gen_name_parts(False))
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
platform_name = get_platform(self.arch_variant.name)
|
||||
|
||||
props_dict = {
|
||||
"name": self.gen_job_name(),
|
||||
"build_environment": self.gen_job_name(),
|
||||
"build_environment": "-".join(self.gen_name_parts(True)),
|
||||
"ios_arch": self.arch_variant.name,
|
||||
"ios_platform": platform_name,
|
||||
"name": self.gen_job_name(),
|
||||
}
|
||||
|
||||
if self.is_org_member_context:
|
||||
@ -53,28 +57,30 @@ class IOSJob:
|
||||
if self.extra_props:
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
props_dict["filters"] = gen_filter_dict_exclude()
|
||||
|
||||
return [{"pytorch_ios_build": props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64"), is_org_member_context=False, extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64"), extra_props={
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={
|
||||
# "use_metal": miniutils.quote(str(int(True))),
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom-ops"), extra_props={
|
||||
# "op_list": "mobilenetv2.yaml",
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64", "full_jit"), is_org_member_context=False, extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(False)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64"), extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={
|
||||
"use_metal": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "full_jit"), extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(False)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom"), extra_props={
|
||||
"op_list": "mobilenetv2.yaml",
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64", "coreml"), is_org_member_context=False, extra_props={
|
||||
"use_coreml": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64", "coreml"), extra_props={
|
||||
# "use_coreml": miniutils.quote(str(int(True))),
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "coreml"), extra_props={
|
||||
"use_coreml": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -1,8 +1,3 @@
|
||||
from collections import OrderedDict
|
||||
from cimodel.lib.miniutils import quote
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict_exclude
|
||||
|
||||
|
||||
class MacOsJob:
|
||||
def __init__(self, os_version, is_build=False, is_test=False, extra_props=tuple()):
|
||||
# extra_props is tuple type, because mutable data structures for argument defaults
|
||||
@ -16,14 +11,10 @@ class MacOsJob:
|
||||
non_phase_parts = ["pytorch", "macos", self.os_version, "py3"]
|
||||
|
||||
extra_name_list = [name for name, exist in self.extra_props.items() if exist]
|
||||
full_job_name_list = (
|
||||
non_phase_parts
|
||||
+ extra_name_list
|
||||
+ [
|
||||
"build" if self.is_build else None,
|
||||
"test" if self.is_test else None,
|
||||
]
|
||||
)
|
||||
full_job_name_list = non_phase_parts + extra_name_list + [
|
||||
'build' if self.is_build else None,
|
||||
'test' if self.is_test else None,
|
||||
]
|
||||
|
||||
full_job_name = "_".join(list(filter(None, full_job_name_list)))
|
||||
|
||||
@ -50,99 +41,12 @@ WORKFLOW_DATA = [
|
||||
"10_13",
|
||||
is_build=True,
|
||||
is_test=True,
|
||||
extra_props=tuple({"lite_interpreter": True}.items()),
|
||||
),
|
||||
extra_props=tuple({
|
||||
"lite_interpreter": True
|
||||
}.items()),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def get_new_workflow_jobs():
|
||||
return [
|
||||
OrderedDict(
|
||||
{
|
||||
"mac_build": OrderedDict(
|
||||
{
|
||||
"name": "macos-12-py3-x86-64-build",
|
||||
"build-environment": "macos-12-py3-x86-64",
|
||||
"xcode-version": quote("13.3.1"),
|
||||
"filters": gen_filter_dict_exclude()
|
||||
}
|
||||
)
|
||||
}
|
||||
),
|
||||
OrderedDict(
|
||||
{
|
||||
"mac_test": OrderedDict(
|
||||
{
|
||||
"name": "macos-12-py3-x86-64-test-1-2-default",
|
||||
"build-environment": "macos-12-py3-x86-64",
|
||||
"xcode-version": quote("13.3.1"),
|
||||
"shard-number": quote("1"),
|
||||
"num-test-shards": quote("2"),
|
||||
"requires": ["macos-12-py3-x86-64-build"],
|
||||
"filters": gen_filter_dict_exclude()
|
||||
}
|
||||
)
|
||||
}
|
||||
),
|
||||
OrderedDict(
|
||||
{
|
||||
"mac_test": OrderedDict(
|
||||
{
|
||||
"name": "macos-12-py3-x86-64-test-2-2-default",
|
||||
"build-environment": "macos-12-py3-x86-64",
|
||||
"xcode-version": quote("13.3.1"),
|
||||
"shard-number": quote("2"),
|
||||
"num-test-shards": quote("2"),
|
||||
"requires": ["macos-12-py3-x86-64-build"],
|
||||
"filters": gen_filter_dict_exclude()
|
||||
}
|
||||
)
|
||||
}
|
||||
),
|
||||
OrderedDict(
|
||||
{
|
||||
"mac_test": OrderedDict(
|
||||
{
|
||||
"name": "macos-12-py3-x86-64-test-1-1-functorch",
|
||||
"build-environment": "macos-12-py3-x86-64",
|
||||
"xcode-version": quote("13.3.1"),
|
||||
"shard-number": quote("1"),
|
||||
"num-test-shards": quote("1"),
|
||||
"test-config": "functorch",
|
||||
"requires": ["macos-12-py3-x86-64-build"],
|
||||
"filters": gen_filter_dict_exclude()
|
||||
}
|
||||
)
|
||||
}
|
||||
),
|
||||
OrderedDict(
|
||||
{
|
||||
"mac_build": OrderedDict(
|
||||
{
|
||||
"name": "macos-12-py3-x86-64-lite-interpreter-build-test",
|
||||
"build-environment": "macos-12-py3-lite-interpreter-x86-64",
|
||||
"xcode-version": quote("13.3.1"),
|
||||
"build-generates-artifacts": "false",
|
||||
"filters": gen_filter_dict_exclude()
|
||||
}
|
||||
)
|
||||
}
|
||||
),
|
||||
OrderedDict(
|
||||
{
|
||||
"mac_build": OrderedDict(
|
||||
{
|
||||
"name": "macos-12-py3-arm64-build",
|
||||
"build-environment": "macos-12-py3-arm64",
|
||||
"xcode-version": quote("13.3.1"),
|
||||
"python-version": quote("3.9.12"),
|
||||
"filters": gen_filter_dict_exclude()
|
||||
}
|
||||
)
|
||||
}
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
|
||||
77
.circleci/cimodel/data/simple/nightly_android.py
Normal file
77
.circleci/cimodel/data/simple/nightly_android.py
Normal file
@ -0,0 +1,77 @@
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_NDK,
|
||||
DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
|
||||
|
||||
class AndroidNightlyJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
template_name,
|
||||
extra_props=None,
|
||||
with_docker=True,
|
||||
requires=None,
|
||||
no_build_suffix=False):
|
||||
|
||||
self.variant = variant
|
||||
self.template_name = template_name
|
||||
self.extra_props = extra_props or {}
|
||||
self.with_docker = with_docker
|
||||
self.requires = requires
|
||||
self.no_build_suffix = no_build_suffix
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"android",
|
||||
"ndk",
|
||||
"r19c",
|
||||
] + self.variant
|
||||
|
||||
build_suffix = [] if self.no_build_suffix else ["build"]
|
||||
full_job_name = "_".join(["nightly"] + base_name_parts + build_suffix)
|
||||
build_env_name = "-".join(base_name_parts)
|
||||
|
||||
props_dict = {
|
||||
"name": full_job_name,
|
||||
"requires": self.requires,
|
||||
"filters": {"branches": {"only": "nightly"}},
|
||||
}
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
if self.with_docker:
|
||||
props_dict["docker_image"] = DOCKER_IMAGE_NDK
|
||||
props_dict["build_environment"] = build_env_name
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
BASE_REQUIRES = [DOCKER_REQUIREMENT_NDK]
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
AndroidNightlyJob(["x86_32"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["x86_64"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["arm", "v7a"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["arm", "v8a"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["android_gradle"], "pytorch_android_gradle_build",
|
||||
with_docker=False,
|
||||
requires=[
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build"]),
|
||||
AndroidNightlyJob(["x86_32_android_publish_snapshot"], "pytorch_android_publish_snapshot",
|
||||
extra_props={"context": "org-member"},
|
||||
with_docker=False,
|
||||
requires=["nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_android_gradle_build"],
|
||||
no_build_suffix=True),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -15,7 +15,7 @@ class IOSNightlyJob:
|
||||
def get_phase_name(self):
|
||||
return "upload" if self.is_upload else "build"
|
||||
|
||||
def get_common_name_pieces(self, sep):
|
||||
def get_common_name_pieces(self, with_version_dots):
|
||||
|
||||
extra_name_suffix = [self.get_phase_name()] if self.is_upload else []
|
||||
|
||||
@ -24,7 +24,7 @@ class IOSNightlyJob:
|
||||
common_name_pieces = [
|
||||
"ios",
|
||||
] + extra_name + [
|
||||
] + ios_definitions.XCODE_VERSION.render_dots_or_parts(sep) + [
|
||||
] + ios_definitions.XCODE_VERSION.render_dots_or_parts(with_version_dots) + [
|
||||
"nightly",
|
||||
self.variant,
|
||||
"build",
|
||||
@ -33,14 +33,14 @@ class IOSNightlyJob:
|
||||
return common_name_pieces
|
||||
|
||||
def gen_job_name(self):
|
||||
return "_".join(["pytorch"] + self.get_common_name_pieces(None))
|
||||
return "_".join(["pytorch"] + self.get_common_name_pieces(False))
|
||||
|
||||
def gen_tree(self):
|
||||
build_configs = BUILD_CONFIGS_FULL_JIT if self.is_full_jit else BUILD_CONFIGS
|
||||
extra_requires = [x.gen_job_name() for x in build_configs] if self.is_upload else []
|
||||
|
||||
props_dict = {
|
||||
"build_environment": "-".join(["libtorch"] + self.get_common_name_pieces(".")),
|
||||
"build_environment": "-".join(["libtorch"] + self.get_common_name_pieces(True)),
|
||||
"requires": extra_requires,
|
||||
"context": "org-member",
|
||||
"filters": {"branches": {"only": "nightly"}},
|
||||
|
||||
@ -1,22 +0,0 @@
|
||||
from typing import OrderedDict
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict_exclude
|
||||
|
||||
|
||||
def get_workflow_job():
|
||||
return [
|
||||
OrderedDict(
|
||||
{
|
||||
"upload_test_stats": OrderedDict(
|
||||
{
|
||||
"name": "upload test status",
|
||||
"requires": [
|
||||
"macos-12-py3-x86-64-test-1-2-default",
|
||||
"macos-12-py3-x86-64-test-2-2-default",
|
||||
"macos-12-py3-x86-64-test-1-1-functorch",
|
||||
],
|
||||
"filters": gen_filter_dict_exclude()
|
||||
}
|
||||
)
|
||||
}
|
||||
),
|
||||
]
|
||||
@ -1,5 +1,4 @@
|
||||
NON_PR_BRANCH_LIST = [
|
||||
"main",
|
||||
"master",
|
||||
r"/ci-all\/.*/",
|
||||
r"/release\/.*/",
|
||||
@ -12,9 +11,6 @@ PR_BRANCH_LIST = [
|
||||
|
||||
RC_PATTERN = r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
|
||||
|
||||
MAC_IOS_EXCLUSION_LIST = ["nightly", "postnightly"]
|
||||
|
||||
|
||||
def gen_filter_dict(
|
||||
branches_list=NON_PR_BRANCH_LIST,
|
||||
tags_list=None
|
||||
@ -29,11 +25,3 @@ def gen_filter_dict(
|
||||
if tags_list is not None:
|
||||
filter_dict["tags"] = {"only": tags_list}
|
||||
return filter_dict
|
||||
|
||||
|
||||
def gen_filter_dict_exclude(branches_list=MAC_IOS_EXCLUSION_LIST):
|
||||
return {
|
||||
"branches": {
|
||||
"ignore": branches_list,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1,6 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class MultiPartVersion:
|
||||
def __init__(self, parts, prefix=""):
|
||||
self.parts = parts
|
||||
@ -16,11 +13,14 @@ class MultiPartVersion:
|
||||
else:
|
||||
return [self.prefix]
|
||||
|
||||
def render_dots_or_parts(self, sep: Optional[str] = None):
|
||||
if sep is None:
|
||||
return self.prefixed_parts()
|
||||
def render_dots(self):
|
||||
return ".".join(self.prefixed_parts())
|
||||
|
||||
def render_dots_or_parts(self, with_dots):
|
||||
if with_dots:
|
||||
return [self.render_dots()]
|
||||
else:
|
||||
return [sep.join(self.prefixed_parts())]
|
||||
return self.prefixed_parts()
|
||||
|
||||
|
||||
class CudaVersion(MultiPartVersion):
|
||||
|
||||
2628
.circleci/config.yml
generated
2628
.circleci/config.yml
generated
File diff suppressed because it is too large
Load Diff
@ -53,7 +53,7 @@ dependencies {
|
||||
implementation 'androidx.appcompat:appcompat:1.0.0'
|
||||
implementation 'com.facebook.fbjni:fbjni-java-only:0.2.2'
|
||||
implementation 'com.google.code.findbugs:jsr305:3.0.1'
|
||||
implementation 'com.facebook.soloader:nativeloader:0.10.4'
|
||||
implementation 'com.facebook.soloader:nativeloader:0.10.1'
|
||||
|
||||
implementation 'junit:junit:' + rootProject.junitVersion
|
||||
implementation 'androidx.test:core:' + rootProject.coreVersion
|
||||
|
||||
@ -54,8 +54,6 @@ elif [[ "$image" == *-bionic* ]]; then
|
||||
UBUNTU_VERSION=18.04
|
||||
elif [[ "$image" == *-focal* ]]; then
|
||||
UBUNTU_VERSION=20.04
|
||||
elif [[ "$image" == *-jammy* ]]; then
|
||||
UBUNTU_VERSION=22.04
|
||||
elif [[ "$image" == *ubuntu* ]]; then
|
||||
extract_version_from_image_name ubuntu UBUNTU_VERSION
|
||||
elif [[ "$image" == *centos* ]]; then
|
||||
@ -72,20 +70,13 @@ else
|
||||
fi
|
||||
|
||||
DOCKERFILE="${OS}/Dockerfile"
|
||||
# When using ubuntu - 22.04, start from Ubuntu docker image, instead of nvidia/cuda docker image.
|
||||
if [[ "$image" == *cuda* && "$UBUNTU_VERSION" != "22.04" ]]; then
|
||||
if [[ "$image" == *cuda* ]]; then
|
||||
DOCKERFILE="${OS}-cuda/Dockerfile"
|
||||
elif [[ "$image" == *rocm* ]]; then
|
||||
DOCKERFILE="${OS}-rocm/Dockerfile"
|
||||
fi
|
||||
|
||||
if [[ "$image" == *xenial* ]] || [[ "$image" == *bionic* ]]; then
|
||||
CMAKE_VERSION=3.13.5
|
||||
fi
|
||||
|
||||
TRAVIS_DL_URL_PREFIX="https://s3.amazonaws.com/travis-python-archives/binaries/ubuntu/14.04/x86_64"
|
||||
_UCX_COMMIT=31e74cac7bee0ef66bef2af72e7d86d9c282e5ab
|
||||
_UCC_COMMIT=12944da33f911daf505d9bbc51411233d0ed85e1
|
||||
|
||||
# It's annoying to rename jobs every time you want to rewrite a
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
@ -93,16 +84,28 @@ _UCC_COMMIT=12944da33f911daf505d9bbc51411233d0ed85e1
|
||||
case "$image" in
|
||||
pytorch-linux-xenial-py3.8)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py3.7-gcc5.4)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=5
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3.7-gcc7.2)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py3.7-gcc7)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -112,6 +115,18 @@ case "$image" in
|
||||
CUDA_VERSION=10.2
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.1
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -123,50 +138,28 @@ case "$image" in
|
||||
CUDNN_VERSION=8
|
||||
TENSORRT_VERSION=8.0.1.6
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda11.3-cudnn8-py3-clang9)
|
||||
CUDA_VERSION=11.3.0 # Deviating from major.minor to conform to nvidia's Docker image names
|
||||
pytorch-linux-bionic-cuda11.5-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.5.0
|
||||
CUDNN_VERSION=8
|
||||
TENSORRT_VERSION=8.0.1.6
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CLANG_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.6.2
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
;;
|
||||
pytorch-linux-bionic-cuda11.7-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.7.0
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang5-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CLANG_VERSION=5.0
|
||||
CMAKE_VERSION=3.13.5
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
@ -174,13 +167,7 @@ case "$image" in
|
||||
pytorch-linux-xenial-py3-clang7-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CLANG_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-focal-py3-clang7-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CLANG_VERSION=7
|
||||
CMAKE_VERSION=3.10.3
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
@ -188,13 +175,7 @@ case "$image" in
|
||||
pytorch-linux-xenial-py3-clang7-onnx)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CLANG_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-focal-py3-clang10-onnx)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CLANG_VERSION=10
|
||||
CMAKE_VERSION=3.10.3
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
@ -202,6 +183,7 @@ case "$image" in
|
||||
pytorch-linux-xenial-py3-clang5-android-ndk-r19c)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CLANG_VERSION=5.0
|
||||
CMAKE_VERSION=3.13.5
|
||||
LLVMDEV=yes
|
||||
PROTOBUF=yes
|
||||
ANDROID=yes
|
||||
@ -211,6 +193,7 @@ case "$image" in
|
||||
;;
|
||||
pytorch-linux-xenial-py3.7-clang7)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.10.3
|
||||
CLANG_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -250,48 +233,31 @@ case "$image" in
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-focal-rocm5.1-py3.7)
|
||||
pytorch-linux-bionic-cuda11.0-cudnn8-py3.7-gcc9)
|
||||
CUDA_VERSION=11.0
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=5.1.1
|
||||
ROCM_VERSION=3.9
|
||||
;;
|
||||
pytorch-linux-focal-rocm5.2-py3.7)
|
||||
pytorch-linux-bionic-rocm4.3.1-py3.7)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=5.2
|
||||
ROCM_VERSION=4.3.1
|
||||
;;
|
||||
pytorch-linux-focal-py3.7-gcc7)
|
||||
pytorch-linux-bionic-rocm4.5-py3.7)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.16.9 # Required for precompiled header support
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda11.6-cudnn8-py3.8-clang12)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CUDA_VERSION=11.6
|
||||
CUDNN_VERSION=8
|
||||
CLANG_VERSION=12
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda11.7-cudnn8-py3.8-clang12)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CUDA_VERSION=11.7
|
||||
CUDNN_VERSION=8
|
||||
CLANG_VERSION=12
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=4.5.2
|
||||
;;
|
||||
*)
|
||||
# Catch-all for builds that are not hardcoded.
|
||||
@ -299,6 +265,9 @@ case "$image" in
|
||||
DB=yes
|
||||
VISION=yes
|
||||
echo "image '$image' did not match an existing build configuration"
|
||||
if [[ "$image" == *xenial* ]]; then
|
||||
CMAKE_VERSION=3.10.3
|
||||
fi
|
||||
if [[ "$image" == *py* ]]; then
|
||||
extract_version_from_image_name py ANACONDA_PYTHON_VERSION
|
||||
fi
|
||||
@ -335,13 +304,6 @@ fi
|
||||
|
||||
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
#when using cudnn version 8 install it separately from cuda
|
||||
if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build image
|
||||
# TODO: build-arg THRIFT is not turned on for any image, remove it once we confirm
|
||||
@ -379,10 +341,7 @@ docker build \
|
||||
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
|
||||
--build-arg "KATEX=${KATEX:-}" \
|
||||
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
|
||||
--build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx906}" \
|
||||
--build-arg "IMAGE_NAME=${IMAGE_NAME}" \
|
||||
--build-arg "UCX_COMMIT=${UCX_COMMIT}" \
|
||||
--build-arg "UCC_COMMIT=${UCC_COMMIT}" \
|
||||
--build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx900;gfx906}" \
|
||||
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
||||
-t "$tmp_tag" \
|
||||
"$@" \
|
||||
|
||||
@ -18,7 +18,6 @@ tag="${DOCKER_TAG}"
|
||||
|
||||
registry="308535385114.dkr.ecr.us-east-1.amazonaws.com"
|
||||
image="${registry}/pytorch/${IMAGE_NAME}"
|
||||
ghcr_image="ghcr.io/pytorch/ci-image"
|
||||
|
||||
login() {
|
||||
aws ecr get-authorization-token --region us-east-1 --output text --query 'authorizationData[].authorizationToken' |
|
||||
@ -47,22 +46,7 @@ fi
|
||||
# Build new image
|
||||
./build.sh ${IMAGE_NAME} -t "${image}:${tag}"
|
||||
|
||||
# Only push if `DOCKER_SKIP_PUSH` = false
|
||||
if [ "${DOCKER_SKIP_PUSH:-true}" = "false" ]; then
|
||||
# Only push if docker image doesn't exist already.
|
||||
# ECR image tags are immutable so this will avoid pushing if only just testing if the docker jobs work
|
||||
# NOTE: The only workflow that should push these images should be the docker-builds.yml workflow
|
||||
if ! docker manifest inspect "${image}:${tag}" >/dev/null 2>/dev/null; then
|
||||
docker push "${image}:${tag}"
|
||||
fi
|
||||
|
||||
if [ "${PUSH_GHCR_IMAGE:-}" = "true" ]; then
|
||||
# Push docker image to the ghcr.io
|
||||
echo $GHCR_PAT | docker login ghcr.io -u pytorch --password-stdin
|
||||
docker tag "${image}:${tag}" "${ghcr_image}:${IMAGE_NAME}-${tag}"
|
||||
docker push "${ghcr_image}:${IMAGE_NAME}-${tag}"
|
||||
fi
|
||||
fi
|
||||
docker push "${image}:${tag}"
|
||||
|
||||
if [ -z "${DOCKER_SKIP_S3_UPLOAD:-}" ]; then
|
||||
trap "rm -rf ${IMAGE_NAME}:${tag}.tar" EXIT
|
||||
|
||||
@ -12,7 +12,7 @@ ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Update CentOS git version
|
||||
@ -23,57 +23,52 @@ RUN yum install -y git
|
||||
|
||||
# Install devtoolset
|
||||
ARG DEVTOOLSET_VERSION
|
||||
COPY ./common/install_devtoolset.sh install_devtoolset.sh
|
||||
ADD ./common/install_devtoolset.sh install_devtoolset.sh
|
||||
RUN bash ./install_devtoolset.sh && rm install_devtoolset.sh
|
||||
ENV BASH_ENV "/etc/profile"
|
||||
|
||||
# (optional) Install non-default glibc version
|
||||
ARG GLIBC_VERSION
|
||||
COPY ./common/install_glibc.sh install_glibc.sh
|
||||
ADD ./common/install_glibc.sh install_glibc.sh
|
||||
RUN if [ -n "${GLIBC_VERSION}" ]; then bash ./install_glibc.sh; fi
|
||||
RUN rm install_glibc.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
RUN rm /opt/conda/requirements-ci.txt
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
COPY ./common/install_protobuf.sh install_protobuf.sh
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
COPY ./common/install_db.sh install_db.sh
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh install_vision.sh
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# Install rocm
|
||||
ARG ROCM_VERSION
|
||||
COPY ./common/install_rocm.sh install_rocm.sh
|
||||
ADD ./common/install_rocm.sh install_rocm.sh
|
||||
RUN bash ./install_rocm.sh
|
||||
RUN rm install_rocm.sh
|
||||
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh
|
||||
RUN rm install_rocm_magma.sh
|
||||
ENV PATH /opt/rocm/bin:$PATH
|
||||
ENV PATH /opt/rocm/hcc/bin:$PATH
|
||||
ENV PATH /opt/rocm/hip/bin:$PATH
|
||||
@ -85,18 +80,18 @@ ENV LC_ALL en_US.utf8
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
ADD ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
COPY ./common/install_ninja.sh install_ninja.sh
|
||||
ADD ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
|
||||
@ -15,37 +15,19 @@ install_ubuntu() {
|
||||
elif [[ "$UBUNTU_VERSION" == "20.04"* ]]; then
|
||||
cmake3="cmake=3.16*"
|
||||
maybe_libiomp_dev=""
|
||||
elif [[ "$UBUNTU_VERSION" == "22.04"* ]]; then
|
||||
cmake3="cmake=3.22*"
|
||||
maybe_libiomp_dev=""
|
||||
else
|
||||
cmake3="cmake=3.5*"
|
||||
maybe_libiomp_dev="libiomp-dev"
|
||||
fi
|
||||
|
||||
if [[ "$CLANG_VERSION" == 12 ]]; then
|
||||
maybe_libomp_dev="libomp-12-dev"
|
||||
elif [[ "$CLANG_VERSION" == 10 ]]; then
|
||||
maybe_libomp_dev="libomp-10-dev"
|
||||
else
|
||||
maybe_libomp_dev=""
|
||||
fi
|
||||
|
||||
# TODO: Remove this once nvidia package repos are back online
|
||||
# Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968
|
||||
# shellcheck disable=SC2046
|
||||
sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list")
|
||||
|
||||
# Install common dependencies
|
||||
apt-get update
|
||||
# TODO: Some of these may not be necessary
|
||||
ccache_deps="asciidoc docbook-xml docbook-xsl xsltproc"
|
||||
deploy_deps="libffi-dev libbz2-dev libreadline-dev libncurses5-dev libncursesw5-dev libgdbm-dev libsqlite3-dev uuid-dev tk-dev"
|
||||
numpy_deps="gfortran"
|
||||
apt-get install -y --no-install-recommends \
|
||||
$ccache_deps \
|
||||
$numpy_deps \
|
||||
${deploy_deps} \
|
||||
${cmake3} \
|
||||
apt-transport-https \
|
||||
autoconf \
|
||||
@ -62,32 +44,15 @@ install_ubuntu() {
|
||||
libjpeg-dev \
|
||||
libasound2-dev \
|
||||
libsndfile-dev \
|
||||
${maybe_libomp_dev} \
|
||||
software-properties-common \
|
||||
wget \
|
||||
sudo \
|
||||
vim \
|
||||
jq \
|
||||
libtool
|
||||
wget \
|
||||
vim
|
||||
|
||||
# Should resolve issues related to various apt package repository cert issues
|
||||
# see: https://github.com/pytorch/pytorch/issues/65931
|
||||
apt-get install -y libgnutls30
|
||||
|
||||
# cuda-toolkit does not work with gcc-11.2.0 which is default in Ubunutu 22.04
|
||||
# see: https://github.com/NVlabs/instant-ngp/issues/119
|
||||
if [[ "$UBUNTU_VERSION" == "22.04"* ]]; then
|
||||
apt-get install -y g++-10
|
||||
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 30
|
||||
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 30
|
||||
update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-10 30
|
||||
|
||||
# https://www.spinics.net/lists/libreoffice/msg07549.html
|
||||
sudo rm -rf /usr/lib/gcc/x86_64-linux-gnu/11
|
||||
wget https://github.com/gcc-mirror/gcc/commit/2b2d97fc545635a0f6aa9c9ee3b017394bc494bf.patch -O noexecpt.patch
|
||||
sudo patch /usr/include/c++/10/bits/range_access.h noexecpt.patch
|
||||
fi
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
@ -5,9 +5,7 @@ set -ex
|
||||
install_ubuntu() {
|
||||
echo "Preparing to build sccache from source"
|
||||
apt-get update
|
||||
# libssl-dev will not work as it is upgraded to libssl3 in Ubuntu-22.04.
|
||||
# Instead use lib and headers from OpenSSL1.1 installed in `install_openssl.sh``
|
||||
apt-get install -y cargo
|
||||
apt-get install -y cargo pkg-config libssl-dev
|
||||
echo "Checking out sccache repo"
|
||||
git clone https://github.com/pytorch/sccache
|
||||
cd sccache
|
||||
@ -48,9 +46,7 @@ fi
|
||||
chmod a+x /opt/cache/bin/sccache
|
||||
|
||||
function write_sccache_stub() {
|
||||
# Unset LD_PRELOAD for ps because of asan + ps issues
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90589
|
||||
printf "#!/bin/sh\nif [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then\n exec sccache $(which $1) \"\$@\"\nelse\n exec $(which $1) \"\$@\"\nfi" > "/opt/cache/bin/$1"
|
||||
printf "#!/bin/sh\nif [ \$(ps -p \$PPID -o comm=) != sccache ]; then\n exec sccache $(which $1) \"\$@\"\nelse\n exec $(which $1) \"\$@\"\nfi" > "/opt/cache/bin/$1"
|
||||
chmod a+x "/opt/cache/bin/$1"
|
||||
}
|
||||
|
||||
|
||||
@ -13,9 +13,6 @@ if [ -n "$CLANG_VERSION" ]; then
|
||||
sudo apt-get install -y --no-install-recommends gpg-agent
|
||||
wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
apt-add-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-${CLANG_VERSION} main"
|
||||
elif [[ $UBUNTU_VERSION == 22.04 ]]; then
|
||||
# work around ubuntu apt-get conflicts
|
||||
sudo apt-get -y -f install
|
||||
fi
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
@ -13,7 +13,12 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
CONDA_FILE="Miniconda2-latest-Linux-x86_64.sh"
|
||||
;;
|
||||
3)
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.6" ]; then
|
||||
# Latest release of Conda that still supports python-3.6
|
||||
CONDA_FILE="Miniconda3-py37_4.10.3-Linux-x86_64.sh"
|
||||
else
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION"
|
||||
@ -21,7 +26,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir -p /opt/conda
|
||||
mkdir /opt/conda
|
||||
chown jenkins:jenkins /opt/conda
|
||||
|
||||
# Work around bug where devtoolset replaces sudo and breaks it.
|
||||
@ -55,10 +60,10 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
# Ensure we run conda in a directory that jenkins has write access to
|
||||
pushd /opt/conda
|
||||
|
||||
# Prevent conda from updating to 4.14.0, which causes docker build failures
|
||||
# See https://hud.pytorch.org/pytorch/pytorch/commit/754d7f05b6841e555cea5a4b2c505dd9e0baec1d
|
||||
# Uncomment the below when resolved to track the latest conda update
|
||||
# as_jenkins conda update -y -n base conda
|
||||
# Track latest conda update
|
||||
if [ "$ANACONDA_PYTHON_VERSION" != "3.6" ]; then
|
||||
as_jenkins conda update -y -n base conda
|
||||
fi
|
||||
|
||||
# Install correct Python version
|
||||
as_jenkins conda install -y python="$ANACONDA_PYTHON_VERSION"
|
||||
@ -70,26 +75,22 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
as_jenkins conda install -q -y python="$ANACONDA_PYTHON_VERSION" $*
|
||||
}
|
||||
|
||||
pip_install() {
|
||||
as_jenkins pip install --progress-bar off $*
|
||||
}
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
# DO NOT install cmake here as it would install a version newer than 3.13, but
|
||||
# we want to pin to version 3.13.
|
||||
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2022.0.1 mkl-include=2022.0.1 setuptools cffi future six"
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.10" ]; then
|
||||
# DO NOT install cmake here as it would install a version newer than 3.10, but
|
||||
# we want to pin to version 3.10.
|
||||
SCIPY_VERSION=1.1.0
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0
|
||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
conda_install numpy=1.19.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0
|
||||
conda_install numpy=1.19.2 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0 -c conda-forge
|
||||
SCIPY_VERSION=1.6.0
|
||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
conda_install numpy=1.18.5 ${CONDA_COMMON_DEPS} llvmdev=8.0.0
|
||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0
|
||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.7" ]; then
|
||||
# DO NOT install dataclasses if installing python-3.7, since its part of python-3.7 core packages
|
||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six typing_extensions
|
||||
else
|
||||
# Install `typing_extensions` for 3.7
|
||||
conda_install numpy=1.18.5 ${CONDA_COMMON_DEPS} typing_extensions
|
||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six dataclasses typing_extensions
|
||||
fi
|
||||
|
||||
# Magma package names are concatenation of CUDA major and minor ignoring revision
|
||||
@ -102,14 +103,34 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
conda_install nnpack -c killeent
|
||||
|
||||
# Install some other packages, including those needed for Python test reporting
|
||||
pip_install -r /opt/conda/requirements-ci.txt
|
||||
# TODO: Why is scipy pinned
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
as_jenkins pip install --progress-bar off pytest \
|
||||
scipy==$SCIPY_VERSION \
|
||||
scikit-image \
|
||||
psutil \
|
||||
unittest-xml-reporting \
|
||||
boto3==1.16.34 \
|
||||
hypothesis==4.53.2 \
|
||||
expecttest==0.1.3 \
|
||||
mypy==0.812 \
|
||||
tb-nightly
|
||||
|
||||
# Install numba only on python-3.8 or below
|
||||
# For numba issue see https://github.com/pytorch/pytorch/issues/51511
|
||||
if [[ $(python -c "import sys; print(int(sys.version_info < (3, 9)))") == "1" ]]; then
|
||||
as_jenkins pip install --progress-bar off numba==0.54.1 "librosa>=0.6.2,<0.9.0"
|
||||
else
|
||||
as_jenkins pip install --progress-bar off numba==0.49.0 "librosa>=0.6.2,<0.9.0"
|
||||
fi
|
||||
|
||||
# Update scikit-learn to a python-3.8 compatible version
|
||||
if [[ $(python -c "import sys; print(int(sys.version_info >= (3, 8)))") == "1" ]]; then
|
||||
pip_install -U scikit-learn
|
||||
as_jenkins pip install --progress-bar off -U scikit-learn
|
||||
else
|
||||
# Pinned scikit-learn due to https://github.com/scikit-learn/scikit-learn/issues/14485 (affects gcc 5.5 only)
|
||||
pip_install scikit-learn==0.20.3
|
||||
as_jenkins pip install --progress-bar off scikit-learn==0.20.3
|
||||
fi
|
||||
|
||||
popd
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
mkdir tmp_cudnn && cd tmp_cudnn
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive"
|
||||
if [[ ${CUDA_VERSION:0:4} == "11.7" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.5.0.96_cuda11-archive"
|
||||
curl -OLs https://ossci-linux.s3.amazonaws.com/${CUDNN_NAME}.tar.xz
|
||||
else
|
||||
curl -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/${CUDNN_NAME}.tar.xz
|
||||
fi
|
||||
|
||||
tar xf ${CUDNN_NAME}.tar.xz
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/include/
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/include/x86_64-linux-gnu/
|
||||
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/lib/x86_64-linux-gnu/
|
||||
cd ..
|
||||
rm -rf tmp_cudnn
|
||||
ldconfig
|
||||
fi
|
||||
@ -3,9 +3,6 @@
|
||||
set -ex
|
||||
|
||||
if [ -n "$KATEX" ]; then
|
||||
apt-get update
|
||||
# Ignore error if gpg-agent doesn't exist (for Ubuntu 16.04)
|
||||
apt-get install -y gpg-agent || :
|
||||
|
||||
curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
|
||||
sudo apt-get install -y nodejs
|
||||
@ -17,8 +14,6 @@ if [ -n "$KATEX" ]; then
|
||||
apt-get install -y --no-install-recommends yarn
|
||||
yarn global add katex --prefix /usr/local
|
||||
|
||||
sudo apt-get -y install doxygen
|
||||
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
@ -10,7 +10,5 @@ cd "${OPENSSL}"
|
||||
./config --prefix=/opt/openssl -d '-Wl,--enable-new-dtags,-rpath,$(LIBRPATH)'
|
||||
# NOTE: openssl install errors out when built with the -j option
|
||||
make -j6; make install_sw
|
||||
# Link the ssl libraries to the /usr/lib folder.
|
||||
sudo ln -s /opt/openssl/lib/lib* /usr/lib
|
||||
cd ..
|
||||
rm -rf "${OPENSSL}"
|
||||
|
||||
@ -2,12 +2,40 @@
|
||||
|
||||
set -ex
|
||||
|
||||
install_magma() {
|
||||
# "install" hipMAGMA into /opt/rocm/magma by copying after build
|
||||
git clone https://bitbucket.org/icl/magma.git
|
||||
pushd magma
|
||||
# fix for magma_queue memory leak issue
|
||||
git checkout c62d700d880c7283b33fb1d615d62fc9c7f7ca21
|
||||
cp make.inc-examples/make.inc.hip-gcc-mkl make.inc
|
||||
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
|
||||
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc
|
||||
echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc
|
||||
export PATH="${PATH}:/opt/rocm/bin"
|
||||
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then
|
||||
amdgpu_targets=`echo $PYTORCH_ROCM_ARCH | sed 's/;/ /g'`
|
||||
else
|
||||
amdgpu_targets=`rocm_agent_enumerator | grep -v gfx000 | sort -u | xargs`
|
||||
fi
|
||||
for arch in $amdgpu_targets; do
|
||||
echo "DEVCCFLAGS += --amdgpu-target=$arch" >> make.inc
|
||||
done
|
||||
# hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition
|
||||
sed -i 's/^FOPENMP/#FOPENMP/g' make.inc
|
||||
make -f make.gen.hipMAGMA -j $(nproc)
|
||||
LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda
|
||||
make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda
|
||||
popd
|
||||
mv magma /opt/rocm
|
||||
}
|
||||
|
||||
ver() {
|
||||
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
|
||||
}
|
||||
|
||||
# Map ROCm version to AMDGPU version
|
||||
declare -A AMDGPU_VERSIONS=( ["5.0"]="21.50" ["5.1.1"]="22.10.1" ["5.2"]="22.20" )
|
||||
declare -A AMDGPU_VERSIONS=( ["4.5.2"]="21.40.2" )
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
@ -61,6 +89,8 @@ install_ubuntu() {
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENKERNELS}
|
||||
fi
|
||||
|
||||
install_magma
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
@ -105,6 +135,8 @@ install_centos() {
|
||||
rocprofiler-dev \
|
||||
roctracer-dev
|
||||
|
||||
install_magma
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# "install" hipMAGMA into /opt/rocm/magma by copying after build
|
||||
git clone https://bitbucket.org/icl/magma.git
|
||||
pushd magma
|
||||
# Fixes memory leaks of magma found while executing linalg UTs
|
||||
git checkout 5959b8783e45f1809812ed96ae762f38ee701972
|
||||
cp make.inc-examples/make.inc.hip-gcc-mkl make.inc
|
||||
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
|
||||
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc
|
||||
echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc
|
||||
export PATH="${PATH}:/opt/rocm/bin"
|
||||
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then
|
||||
amdgpu_targets=`echo $PYTORCH_ROCM_ARCH | sed 's/;/ /g'`
|
||||
else
|
||||
amdgpu_targets=`rocm_agent_enumerator | grep -v gfx000 | sort -u | xargs`
|
||||
fi
|
||||
for arch in $amdgpu_targets; do
|
||||
echo "DEVCCFLAGS += --amdgpu-target=$arch" >> make.inc
|
||||
done
|
||||
# hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition
|
||||
sed -i 's/^FOPENMP/#FOPENMP/g' make.inc
|
||||
make -f make.gen.hipMAGMA -j $(nproc)
|
||||
LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda
|
||||
make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda
|
||||
popd
|
||||
mv magma /opt/rocm
|
||||
7
.circleci/docker/common/install_tensorrt.sh
Normal file
7
.circleci/docker/common/install_tensorrt.sh
Normal file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -n "$TENSORRT_VERSION" ]; then
|
||||
python3 -m pip install --upgrade setuptools pip
|
||||
python3 -m pip install nvidia-pyindex
|
||||
python3 -m pip install nvidia-tensorrt==${TENSORRT_VERSION} --extra-index-url https://pypi.ngc.nvidia.com
|
||||
fi
|
||||
@ -1,48 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [[ -d "/usr/local/cuda/" ]]; then
|
||||
with_cuda=/usr/local/cuda/
|
||||
else
|
||||
with_cuda=no
|
||||
fi
|
||||
|
||||
function install_ucx() {
|
||||
set -ex
|
||||
git clone --recursive https://github.com/openucx/ucx.git
|
||||
pushd ucx
|
||||
git checkout ${UCX_COMMIT}
|
||||
git submodule update --init --recursive
|
||||
|
||||
./autogen.sh
|
||||
./configure --prefix=$UCX_HOME \
|
||||
--enable-mt \
|
||||
--with-cuda=$with_cuda \
|
||||
--enable-profiling \
|
||||
--enable-stats
|
||||
time make -j
|
||||
sudo make install
|
||||
|
||||
popd
|
||||
rm -rf ucx
|
||||
}
|
||||
|
||||
function install_ucc() {
|
||||
set -ex
|
||||
git clone --recursive https://github.com/openucx/ucc.git
|
||||
pushd ucc
|
||||
git checkout ${UCC_COMMIT}
|
||||
git submodule update --init --recursive
|
||||
|
||||
./autogen.sh
|
||||
./configure --prefix=$UCC_HOME --with-ucx=$UCX_HOME --with-cuda=$with_cuda
|
||||
time make -j
|
||||
sudo make install
|
||||
|
||||
popd
|
||||
rm -rf ucc
|
||||
}
|
||||
|
||||
install_ucx
|
||||
install_ucc
|
||||
@ -3,11 +3,8 @@
|
||||
set -ex
|
||||
|
||||
# Mirror jenkins user in container
|
||||
# jenkins user as ec2-user should have the same user-id
|
||||
echo "jenkins:x:1000:1000::/var/lib/jenkins:" >> /etc/passwd
|
||||
echo "jenkins:x:1000:" >> /etc/group
|
||||
# Needed on focal or newer
|
||||
echo "jenkins:*:19110:0:99999:7:::" >>/etc/shadow
|
||||
echo "jenkins:x:1014:1014::/var/lib/jenkins:" >> /etc/passwd
|
||||
echo "jenkins:x:1014:" >> /etc/group
|
||||
|
||||
# Create $HOME
|
||||
mkdir -p /var/lib/jenkins
|
||||
@ -21,6 +18,3 @@ chown jenkins:jenkins /usr/local
|
||||
# Allow sudo
|
||||
# TODO: Maybe we shouldn't
|
||||
echo 'jenkins ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/jenkins
|
||||
|
||||
# Test that sudo works
|
||||
sudo -u jenkins sudo -v
|
||||
|
||||
@ -1,244 +0,0 @@
|
||||
# Python dependencies required for unit tests
|
||||
|
||||
#awscli==1.6 #this breaks some platforms
|
||||
#Description: AWS command line interface
|
||||
#Pinned versions: 1.6
|
||||
#test that import:
|
||||
|
||||
boto3==1.19.12
|
||||
#Description: AWS SDK for python
|
||||
#Pinned versions: 1.19.12, 1.16.34
|
||||
#test that import:
|
||||
|
||||
click
|
||||
#Description: Command Line Interface Creation Kit
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
coremltools==5.0b5
|
||||
#Description: Apple framework for ML integration
|
||||
#Pinned versions: 5.0b5
|
||||
#test that import:
|
||||
|
||||
#dataclasses #this breaks some platforms
|
||||
#Description: Provides decorators for auto adding special methods to user classes
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
expecttest==0.1.3
|
||||
#Description: method for writing tests where test framework auto populates
|
||||
# the expected output based on previous runs
|
||||
#Pinned versions: 0.1.3
|
||||
#test that import:
|
||||
|
||||
flatbuffers==2.0
|
||||
#Description: cross platform serialization library
|
||||
#Pinned versions: 2.0
|
||||
#test that import:
|
||||
|
||||
#future #this breaks linux-bionic-rocm4.5-py3.7
|
||||
#Description: compatibility layer between python 2 and python 3
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
hypothesis==5.35.1
|
||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
#Description: advanced library for generating parametrized tests
|
||||
#Pinned versions: 3.44.6, 4.53.2
|
||||
#test that import: test_xnnpack_integration.py, test_pruning_op.py, test_nn.py
|
||||
|
||||
junitparser==2.1.1
|
||||
#Description: unitparser handles JUnit/xUnit Result XML files
|
||||
#Pinned versions: 2.1.1
|
||||
#test that import:
|
||||
|
||||
librosa>=0.6.2
|
||||
#Description: A python package for music and audio analysis
|
||||
#Pinned versions: >=0.6.2
|
||||
#test that import: test_spectral_ops.py
|
||||
|
||||
#mkl #this breaks linux-bionic-rocm4.5-py3.7
|
||||
#Description: Intel oneAPI Math Kernel Library
|
||||
#Pinned versions:
|
||||
#test that import: test_profiler.py, test_public_bindings.py, test_testing.py,
|
||||
#test_nn.py, test_mkldnn.py, test_jit.py, test_fx_experimental.py,
|
||||
#test_autograd.py
|
||||
|
||||
#mkl-devel
|
||||
# see mkl
|
||||
|
||||
#mock # breaks ci/circleci: docker-pytorch-linux-xenial-py3-clang5-android-ndk-r19c
|
||||
#Description: A testing library that allows you to replace parts of your
|
||||
#system under test with mock objects
|
||||
#Pinned versions:
|
||||
#test that import: test_module_init.py, test_modules.py, test_nn.py,
|
||||
#test_testing.py
|
||||
|
||||
#MonkeyType # breaks pytorch-xla-linux-bionic-py3.7-clang8
|
||||
#Description: collects runtime types of function arguments and return
|
||||
#values, and can automatically generate stub files
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==0.960
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 0.960
|
||||
#test that import: test_typing.py, test_type_hints.py
|
||||
|
||||
networkx==2.6.3
|
||||
#Description: creation, manipulation, and study of
|
||||
#the structure, dynamics, and functions of complex networks
|
||||
#Pinned versions: 2.6.3 (latest version that works with Python 3.7+)
|
||||
#test that import: functorch
|
||||
|
||||
#ninja
|
||||
#Description: build system. Note that it install from
|
||||
#here breaks things so it is commented out
|
||||
#Pinned versions: 1.10.0.post1
|
||||
#test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py
|
||||
|
||||
numba==0.49.0 ; python_version < "3.9"
|
||||
numba==0.54.1 ; python_version == "3.9"
|
||||
numba==0.55.2 ; python_version == "3.10"
|
||||
#Description: Just-In-Time Compiler for Numerical Functions
|
||||
#Pinned versions: 0.54.1, 0.49.0, <=0.49.1
|
||||
#test that import: test_numba_integration.py
|
||||
#For numba issue see https://github.com/pytorch/pytorch/issues/51511
|
||||
|
||||
#numpy
|
||||
#Description: Provides N-dimensional arrays and linear algebra
|
||||
#Pinned versions: 1.20
|
||||
#test that import: test_view_ops.py, test_unary_ufuncs.py, test_type_promotion.py,
|
||||
#test_type_info.py, test_torch.py, test_tensorexpr_pybind.py, test_tensorexpr.py,
|
||||
#test_tensorboard.py, test_tensor_creation_ops.py, test_static_runtime.py,
|
||||
#test_spectral_ops.py, test_sort_and_select.py, test_shape_ops.py,
|
||||
#test_segment_reductions.py, test_reductions.py, test_pruning_op.py,
|
||||
#test_overrides.py, test_numpy_interop.py, test_numba_integration.py
|
||||
#test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py,
|
||||
#test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py,
|
||||
#test_binary_ufuncs.py
|
||||
|
||||
#onnxruntime
|
||||
#Description: scoring engine for Open Neural Network Exchange (ONNX) models
|
||||
#Pinned versions: 1.9.0
|
||||
#test that import:
|
||||
|
||||
opt-einsum==3.3
|
||||
#Description: Python library to optimize tensor contraction order, used in einsum
|
||||
#Pinned versions: 3.3
|
||||
#test that import: test_linalg.py
|
||||
|
||||
#pillow
|
||||
#Description: Python Imaging Library fork
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
protobuf==3.20.2
|
||||
#Description: Google’s data interchange format
|
||||
#Pinned versions: 3.20.1
|
||||
#test that import: test_tensorboard.py
|
||||
|
||||
psutil
|
||||
#Description: information on running processes and system utilization
|
||||
#Pinned versions:
|
||||
#test that import: test_profiler.py, test_openmp.py, test_dataloader.py
|
||||
|
||||
pytest
|
||||
#Description: testing framework
|
||||
#Pinned versions:
|
||||
#test that import: test_typing.py, test_cpp_extensions_aot.py, run_test.py
|
||||
|
||||
pytest-xdist
|
||||
#Description: plugin for running pytest in parallel
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
pytest-shard
|
||||
#Description: plugin spliting up tests in pytest
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
pytest-rerunfailures
|
||||
#Description: plugin for rerunning tests in pytest
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#pytest-benchmark
|
||||
#Description: fixture for benchmarking code
|
||||
#Pinned versions: 3.2.3
|
||||
#test that import:
|
||||
|
||||
#pytest-sugar
|
||||
#Description: shows failures and errors instantly
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
xdoctest==1.0.2
|
||||
#Description: runs doctests in pytest
|
||||
#Pinned versions: 1.0.2
|
||||
#test that import:
|
||||
|
||||
pygments==2.12.0
|
||||
#Description: support doctest highlighting
|
||||
#Pinned versions: 2.12.0
|
||||
#test that import: the doctests
|
||||
|
||||
#PyYAML
|
||||
#Description: data serialization format
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#requests
|
||||
#Description: HTTP library
|
||||
#Pinned versions:
|
||||
#test that import: test_type_promotion.py
|
||||
|
||||
#rich
|
||||
#Description: rich text and beautiful formatting in the terminal
|
||||
#Pinned versions: 10.9.0
|
||||
#test that import:
|
||||
|
||||
scikit-image
|
||||
#Description: image processing routines
|
||||
#Pinned versions:
|
||||
#test that import: test_nn.py
|
||||
|
||||
#scikit-learn
|
||||
#Description: machine learning package
|
||||
#Pinned versions: 0.20.3
|
||||
#test that import:
|
||||
|
||||
scipy==1.6.3 ; python_version < "3.10"
|
||||
scipy==1.8.1 ; python_version == "3.10"
|
||||
# Pin SciPy because of failing distribution tests (see #60347)
|
||||
#Description: scientific python
|
||||
#Pinned versions: 1.6.3
|
||||
#test that import: test_unary_ufuncs.py, test_torch.py,test_tensor_creation_ops.py
|
||||
#test_spectral_ops.py, test_sparse_csr.py, test_reductions.py,test_nn.py
|
||||
#test_linalg.py, test_binary_ufuncs.py
|
||||
|
||||
#tabulate
|
||||
#Description: Pretty-print tabular data
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
tb-nightly
|
||||
#Description: TensorBoard
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#typing-extensions
|
||||
#Description: type hints for python
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#virtualenv
|
||||
#Description: virtual environment for python
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
unittest-xml-reporting<=3.2.0,>=2.0.0
|
||||
#Description: saves unit test results to xml
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
@ -1,106 +1,95 @@
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG IMAGE_NAME
|
||||
ARG CUDNN_VERSION
|
||||
|
||||
FROM ${IMAGE_NAME}
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG CUDNN_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
|
||||
RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
ADD ./common/install_katex.sh install_katex.sh
|
||||
RUN bash ./install_katex.sh && rm install_katex.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
RUN rm /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
ADD ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install clang
|
||||
ARG CLANG_VERSION
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
ADD ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
COPY ./common/install_protobuf.sh install_protobuf.sh
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
COPY ./common/install_db.sh install_db.sh
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh install_vision.sh
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# (optional) Install UCC
|
||||
ARG UCX_COMMIT
|
||||
ARG UCC_COMMIT
|
||||
ENV UCX_COMMIT $UCX_COMMIT
|
||||
ENV UCC_COMMIT $UCC_COMMIT
|
||||
ENV UCX_HOME /usr
|
||||
ENV UCC_HOME /usr
|
||||
ADD ./common/install_ucc.sh install_ucc.sh
|
||||
RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi
|
||||
RUN rm install_ucc.sh
|
||||
|
||||
COPY ./common/install_openssl.sh install_openssl.sh
|
||||
ADD ./common/install_openssl.sh install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_DIR /opt/openssl
|
||||
|
||||
# (optional) Install TensorRT
|
||||
ARG TENSORRT_VERSION
|
||||
ADD ./common/install_tensorrt.sh install_tensorrt.sh
|
||||
RUN if [ -n "${TENSORRT_VERSION}" ]; then bash ./install_tensorrt.sh; fi
|
||||
RUN rm install_tensorrt.sh
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
ADD ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
# See https://github.com/pytorch/pytorch/issues/82174
|
||||
# TODO(sdym@fb.com):
|
||||
# check if this is needed after full off Xenial migration
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
ENV CMAKE_CUDA_COMPILER_LAUNCHER=/opt/cache/bin/sccache
|
||||
|
||||
# Add jni.h for java host build
|
||||
COPY ./common/install_jni.sh install_jni.sh
|
||||
COPY ./java/jni.h jni.h
|
||||
ADD ./common/install_jni.sh install_jni.sh
|
||||
ADD ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Install Open MPI for CUDA
|
||||
COPY ./common/install_openmpi.sh install_openmpi.sh
|
||||
ADD ./common/install_openmpi.sh install_openmpi.sh
|
||||
RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi
|
||||
RUN rm install_openmpi.sh
|
||||
|
||||
@ -116,16 +105,5 @@ ENV CUDA_PATH /usr/local/cuda
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
# Install CUDNN
|
||||
ARG CUDNN_VERSION
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudnn.sh install_cudnn.sh
|
||||
RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi
|
||||
RUN rm install_cudnn.sh
|
||||
|
||||
# Delete /usr/local/cuda-11.X/cuda-11.X symlinks
|
||||
RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi
|
||||
RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
|
||||
@ -12,61 +12,56 @@ ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install clang
|
||||
ARG LLVMDEV
|
||||
ARG CLANG_VERSION
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
ADD ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
RUN rm /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
ADD ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
COPY ./common/install_protobuf.sh install_protobuf.sh
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
COPY ./common/install_db.sh install_db.sh
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh install_vision.sh
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# Install rocm
|
||||
ARG ROCM_VERSION
|
||||
COPY ./common/install_rocm.sh install_rocm.sh
|
||||
ADD ./common/install_rocm.sh install_rocm.sh
|
||||
RUN bash ./install_rocm.sh
|
||||
RUN rm install_rocm.sh
|
||||
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh
|
||||
RUN rm install_rocm_magma.sh
|
||||
ENV PATH /opt/rocm/bin:$PATH
|
||||
ENV PATH /opt/rocm/hcc/bin:$PATH
|
||||
ENV PATH /opt/rocm/hip/bin:$PATH
|
||||
@ -78,18 +73,18 @@ ENV LC_ALL C.UTF-8
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
ADD ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
COPY ./common/install_ninja.sh install_ninja.sh
|
||||
ADD ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
|
||||
@ -6,86 +6,65 @@ ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
ARG CLANG_VERSION
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install clang
|
||||
ARG LLVMDEV
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
ARG CLANG_VERSION
|
||||
ADD ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install thrift.
|
||||
ARG THRIFT
|
||||
COPY ./common/install_thrift.sh install_thrift.sh
|
||||
ADD ./common/install_thrift.sh install_thrift.sh
|
||||
RUN if [ -n "${THRIFT}" ]; then bash ./install_thrift.sh; fi
|
||||
RUN rm install_thrift.sh
|
||||
ENV INSTALLED_THRIFT ${THRIFT}
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
|
||||
RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
ADD ./common/install_katex.sh install_katex.sh
|
||||
RUN bash ./install_katex.sh && rm install_katex.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
RUN rm /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
ADD ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install lcov for C++ code coverage
|
||||
COPY ./common/install_lcov.sh install_lcov.sh
|
||||
ADD ./common/install_lcov.sh install_lcov.sh
|
||||
RUN bash ./install_lcov.sh && rm install_lcov.sh
|
||||
|
||||
# Install cuda and cudnn
|
||||
ARG CUDA_VERSION
|
||||
RUN wget -q https://raw.githubusercontent.com/pytorch/builder/main/common/install_cuda.sh -O install_cuda.sh
|
||||
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh
|
||||
ENV DESIRED_CUDA ${CUDA_VERSION}
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
|
||||
|
||||
# (optional) Install UCC
|
||||
ARG UCX_COMMIT
|
||||
ARG UCC_COMMIT
|
||||
ENV UCX_COMMIT $UCX_COMMIT
|
||||
ENV UCC_COMMIT $UCC_COMMIT
|
||||
ENV UCX_HOME /usr
|
||||
ENV UCC_HOME /usr
|
||||
ADD ./common/install_ucc.sh install_ucc.sh
|
||||
RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi
|
||||
RUN rm install_ucc.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
COPY ./common/install_protobuf.sh install_protobuf.sh
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
COPY ./common/install_db.sh install_db.sh
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh install_vision.sh
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
@ -94,9 +73,9 @@ ENV INSTALLED_VISION ${VISION}
|
||||
ARG ANDROID
|
||||
ARG ANDROID_NDK
|
||||
ARG GRADLE_VERSION
|
||||
COPY ./common/install_android.sh install_android.sh
|
||||
COPY ./android/AndroidManifest.xml AndroidManifest.xml
|
||||
COPY ./android/build.gradle build.gradle
|
||||
ADD ./common/install_android.sh install_android.sh
|
||||
ADD ./android/AndroidManifest.xml AndroidManifest.xml
|
||||
ADD ./android/build.gradle build.gradle
|
||||
RUN if [ -n "${ANDROID}" ]; then bash ./install_android.sh; fi
|
||||
RUN rm install_android.sh
|
||||
RUN rm AndroidManifest.xml
|
||||
@ -105,53 +84,42 @@ ENV INSTALLED_ANDROID ${ANDROID}
|
||||
|
||||
# (optional) Install Vulkan SDK
|
||||
ARG VULKAN_SDK_VERSION
|
||||
COPY ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh
|
||||
ADD ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh
|
||||
RUN if [ -n "${VULKAN_SDK_VERSION}" ]; then bash ./install_vulkan_sdk.sh; fi
|
||||
RUN rm install_vulkan_sdk.sh
|
||||
|
||||
# (optional) Install swiftshader
|
||||
ARG SWIFTSHADER
|
||||
COPY ./common/install_swiftshader.sh install_swiftshader.sh
|
||||
ADD ./common/install_swiftshader.sh install_swiftshader.sh
|
||||
RUN if [ -n "${SWIFTSHADER}" ]; then bash ./install_swiftshader.sh; fi
|
||||
RUN rm install_swiftshader.sh
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
ADD ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
COPY ./common/install_ninja.sh install_ninja.sh
|
||||
ADD ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
COPY ./common/install_openssl.sh install_openssl.sh
|
||||
ADD ./common/install_openssl.sh install_openssl.sh
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
ENV OPENSSL_DIR /opt/openssl
|
||||
RUN rm install_openssl.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
# See https://github.com/pytorch/pytorch/issues/82174
|
||||
# TODO(sdym@fb.com):
|
||||
# check if this is needed after full off Xenial migration
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Add jni.h for java host build
|
||||
COPY ./common/install_jni.sh install_jni.sh
|
||||
COPY ./java/jni.h jni.h
|
||||
ADD ./common/install_jni.sh install_jni.sh
|
||||
ADD ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Install Open MPI for CUDA
|
||||
COPY ./common/install_openmpi.sh install_openmpi.sh
|
||||
RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi
|
||||
RUN rm install_openmpi.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
@ -159,10 +127,5 @@ ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
|
||||
ENV CUDA_PATH /usr/local/cuda
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
|
||||
@ -10,13 +10,14 @@ import shutil
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
|
||||
import cimodel.data.binary_build_definitions as binary_build_definitions
|
||||
import cimodel.data.simple.android_definitions
|
||||
import cimodel.data.simple.binary_smoketest
|
||||
import cimodel.data.simple.docker_definitions
|
||||
import cimodel.data.simple.mobile_definitions
|
||||
import cimodel.data.simple.nightly_android
|
||||
import cimodel.data.simple.nightly_ios
|
||||
import cimodel.data.simple.anaconda_prune_defintions
|
||||
import cimodel.data.simple.macos_definitions
|
||||
import cimodel.data.simple.upload_test_stats_definition
|
||||
import cimodel.data.simple.ios_definitions
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.lib.miniyaml as miniyaml
|
||||
|
||||
@ -73,7 +74,6 @@ class Header(object):
|
||||
for line in filter(None, lines):
|
||||
output_filehandle.write(line + "\n")
|
||||
|
||||
|
||||
def _for_all_items(items, functor) -> None:
|
||||
if isinstance(items, list):
|
||||
for item in items:
|
||||
@ -82,13 +82,12 @@ def _for_all_items(items, functor) -> None:
|
||||
item_type, item = next(iter(items.items()))
|
||||
functor(item_type, item)
|
||||
|
||||
|
||||
def filter_master_only_jobs(items):
|
||||
def _is_main_or_master_item(item):
|
||||
def _is_master_item(item):
|
||||
filters = item.get('filters', None)
|
||||
branches = filters.get('branches', None) if filters is not None else None
|
||||
branches_only = branches.get('only', None) if branches is not None else None
|
||||
return ('main' in branches_only or 'master' in branches_only) if branches_only is not None else False
|
||||
return 'master' in branches_only if branches_only is not None else False
|
||||
|
||||
master_deps = set()
|
||||
|
||||
@ -97,7 +96,7 @@ def filter_master_only_jobs(items):
|
||||
item_name = item.get("name", None)
|
||||
if not isinstance(requires, list):
|
||||
return
|
||||
if _is_main_or_master_item(item) or item_name in master_deps:
|
||||
if _is_master_item(item) or item_name in master_deps:
|
||||
master_deps.update([n.strip('"') for n in requires])
|
||||
|
||||
def _do_filtering(items):
|
||||
@ -108,7 +107,7 @@ def filter_master_only_jobs(items):
|
||||
item_type, item = next(iter(items.items()))
|
||||
item_name = item.get("name", None)
|
||||
item_name = item_name.strip('"') if item_name is not None else None
|
||||
if not _is_main_or_master_item(item) and item_name not in master_deps:
|
||||
if not _is_master_item(item) and item_name not in master_deps:
|
||||
return None
|
||||
if 'filters' in item:
|
||||
item = item.copy()
|
||||
@ -116,12 +115,11 @@ def filter_master_only_jobs(items):
|
||||
return {item_type: item}
|
||||
|
||||
# Scan of dependencies twice to pick up nested required jobs
|
||||
# I.e. jobs depending on jobs that main-only job depend on
|
||||
# I.e. jobs depending on jobs that master-only job depend on
|
||||
_for_all_items(items, _save_requires_if_master)
|
||||
_for_all_items(items, _save_requires_if_master)
|
||||
return _do_filtering(items)
|
||||
|
||||
|
||||
def generate_required_docker_images(items):
|
||||
required_docker_images = set()
|
||||
|
||||
@ -137,15 +135,16 @@ def generate_required_docker_images(items):
|
||||
_for_all_items(items, _requires_docker_image)
|
||||
return required_docker_images
|
||||
|
||||
|
||||
def gen_build_workflows_tree():
|
||||
build_workflows_functions = [
|
||||
cimodel.data.simple.android_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.mobile_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.binary_smoketest.get_workflow_jobs,
|
||||
cimodel.data.simple.nightly_ios.get_workflow_jobs,
|
||||
cimodel.data.simple.nightly_android.get_workflow_jobs,
|
||||
cimodel.data.simple.anaconda_prune_defintions.get_workflow_jobs,
|
||||
cimodel.data.simple.macos_definitions.get_new_workflow_jobs,
|
||||
cimodel.data.simple.upload_test_stats_definition.get_workflow_job,
|
||||
cimodel.data.simple.ios_definitions.get_workflow_jobs,
|
||||
binary_build_definitions.get_post_upload_jobs,
|
||||
binary_build_definitions.get_binary_smoke_test_jobs,
|
||||
]
|
||||
build_jobs = [f() for f in build_workflows_functions]
|
||||
build_jobs.extend(
|
||||
@ -156,20 +155,28 @@ def gen_build_workflows_tree():
|
||||
)
|
||||
master_build_jobs = filter_master_only_jobs(build_jobs)
|
||||
|
||||
rc = {
|
||||
binary_build_functions = [
|
||||
binary_build_definitions.get_binary_build_jobs,
|
||||
binary_build_definitions.get_nightly_tests,
|
||||
binary_build_definitions.get_nightly_uploads,
|
||||
]
|
||||
|
||||
return {
|
||||
"workflows": {
|
||||
"binary_builds": {
|
||||
"when": r"<< pipeline.parameters.run_binary_tests >>",
|
||||
"jobs": [f() for f in binary_build_functions],
|
||||
},
|
||||
"build": {
|
||||
"when": r"<< pipeline.parameters.run_build >>",
|
||||
"jobs": build_jobs,
|
||||
},
|
||||
"master_build": {
|
||||
"when": r"<< pipeline.parameters.run_master_build >>",
|
||||
"jobs": master_build_jobs,
|
||||
},
|
||||
}
|
||||
}
|
||||
if len(master_build_jobs) > 0:
|
||||
rc["workflows"]["master_build"] = {
|
||||
"when": r"<< pipeline.parameters.run_master_build >>",
|
||||
"jobs": master_build_jobs,
|
||||
}
|
||||
return rc
|
||||
|
||||
|
||||
# Order of this list matters to the generated config.yml.
|
||||
@ -180,14 +187,18 @@ YAML_SOURCES = [
|
||||
Header("Build parameters"),
|
||||
File("build-parameters/pytorch-build-params.yml"),
|
||||
File("build-parameters/binary-build-params.yml"),
|
||||
File("build-parameters/promote-build-params.yml"),
|
||||
Header("Job specs"),
|
||||
File("job-specs/pytorch-job-specs.yml"),
|
||||
File("job-specs/binary-job-specs.yml"),
|
||||
File("job-specs/job-specs-custom.yml"),
|
||||
File("job-specs/job-specs-promote.yml"),
|
||||
File("job-specs/binary_update_htmls.yml"),
|
||||
File("job-specs/binary-build-tests.yml"),
|
||||
File("job-specs/docker_jobs.yml"),
|
||||
Header("Workflows"),
|
||||
Treegen(gen_build_workflows_tree, 0),
|
||||
File("workflows/workflows-promote.yml"),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -49,9 +49,8 @@ if [[ -n "${CIRCLE_PR_NUMBER:-}" ]]; then
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
elif [[ -n "${CIRCLE_SHA1:-}" ]]; then
|
||||
# Scheduled workflows & "smoke" binary build on master on PR merges
|
||||
DEFAULT_BRANCH="$(git remote show $CIRCLE_REPOSITORY_URL | awk '/HEAD branch/ {print $NF}')"
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
git checkout -q -B $DEFAULT_BRANCH
|
||||
git checkout -q -B master
|
||||
else
|
||||
echo "Can't tell what to checkout"
|
||||
exit 1
|
||||
@ -62,7 +61,7 @@ git --no-pager log --max-count 1
|
||||
popd
|
||||
|
||||
# Clone the Builder master repo
|
||||
retry git clone -q https://github.com/pytorch/builder.git -b release/1.13 "$BUILDER_ROOT"
|
||||
retry git clone -q https://github.com/pytorch/builder.git -b release/1.11 "$BUILDER_ROOT"
|
||||
pushd "$BUILDER_ROOT"
|
||||
echo "Using builder from "
|
||||
git --no-pager log --max-count 1
|
||||
|
||||
@ -1,19 +1,30 @@
|
||||
#!/bin/bash
|
||||
set -ex -o pipefail
|
||||
|
||||
if ! [ "$IOS_PLATFORM" == "SIMULATOR" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "DIR: $(pwd)"
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
cd ${PROJ_ROOT}/ios/TestApp
|
||||
# install fastlane
|
||||
sudo gem install bundler && bundle install
|
||||
# install certificates
|
||||
echo "${IOS_CERT_KEY_2022}" >> cert.txt
|
||||
base64 --decode cert.txt -o Certificates.p12
|
||||
rm cert.txt
|
||||
bundle exec fastlane install_root_cert
|
||||
bundle exec fastlane install_dev_cert
|
||||
# install the provisioning profile
|
||||
PROFILE=PyTorch_CI_2022.mobileprovision
|
||||
PROVISIONING_PROFILES=~/Library/MobileDevice/Provisioning\ Profiles
|
||||
mkdir -pv "${PROVISIONING_PROFILES}"
|
||||
cd "${PROVISIONING_PROFILES}"
|
||||
echo "${IOS_SIGN_KEY_2022}" >> cert.txt
|
||||
base64 --decode cert.txt -o ${PROFILE}
|
||||
rm cert.txt
|
||||
# run the ruby build script
|
||||
if ! [ -x "$(command -v xcodebuild)" ]; then
|
||||
echo 'Error: xcodebuild is not installed.'
|
||||
exit 1
|
||||
fi
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM}
|
||||
PROFILE=PyTorch_CI_2022
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID}
|
||||
|
||||
@ -33,7 +33,7 @@ fi
|
||||
cp ${PROJ_ROOT}/LICENSE ${ZIP_DIR}/
|
||||
# zip the library
|
||||
export DATE="$(date -u +%Y%m%d)"
|
||||
export IOS_NIGHTLY_BUILD_VERSION="1.13.0.${DATE}"
|
||||
export IOS_NIGHTLY_BUILD_VERSION="1.11.0.${DATE}"
|
||||
if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then
|
||||
# libtorch_lite_ios_nightly_1.11.0.20210810.zip
|
||||
ZIPFILE="libtorch_lite_ios_nightly_${IOS_NIGHTLY_BUILD_VERSION}.zip"
|
||||
|
||||
@ -26,7 +26,7 @@ else
|
||||
build_script='manywheel/build.sh'
|
||||
fi
|
||||
|
||||
if [[ "$CIRCLE_BRANCH" == "main" ]] || [[ "$CIRCLE_BRANCH" == "master" ]] || [[ "$CIRCLE_BRANCH" == release/* ]]; then
|
||||
if [[ "$CIRCLE_BRANCH" == "master" ]] || [[ "$CIRCLE_BRANCH" == release/* ]]; then
|
||||
export BUILD_DEBUG_INFO=1
|
||||
fi
|
||||
|
||||
|
||||
@ -53,7 +53,9 @@ if [[ "\$python_nodot" = *39* ]]; then
|
||||
NUMPY_PIN=">=1.20"
|
||||
fi
|
||||
|
||||
|
||||
if [[ "$DESIRED_CUDA" == "cu112" || "$DESIRED_CUDA" == "cu115" ]]; then
|
||||
EXTRA_CONDA_FLAGS="-c=conda-forge"
|
||||
fi
|
||||
|
||||
# Move debug wheels out of the the package dir so they don't get installed
|
||||
mkdir -p /tmp/debug_final_pkgs
|
||||
@ -65,8 +67,7 @@ mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to m
|
||||
# TODO there is duplicated and inconsistent test-python-env setup across this
|
||||
# file, builder/smoke_test.sh, and builder/run_tests.sh, and also in the
|
||||
# conda build scripts themselves. These should really be consolidated
|
||||
# Pick only one package of multiple available (which happens as result of workflow re-runs)
|
||||
pkg="/final_pkgs/\$(ls -1 /final_pkgs|sort|tail -1)"
|
||||
pkg="/final_pkgs/\$(ls /final_pkgs)"
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
(
|
||||
# For some reason conda likes to re-activate the conda environment when attempting this install
|
||||
@ -86,14 +87,13 @@ if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
|
||||
retry conda install -c pytorch -y cpuonly
|
||||
else
|
||||
|
||||
cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
|
||||
CUDA_PACKAGE="cudatoolkit"
|
||||
if [[ "$DESIRED_CUDA" == "cu116" || "$DESIRED_CUDA" == "cu117" ]]; then
|
||||
CUDA_PACKAGE="cuda"
|
||||
# DESIRED_CUDA is in format cu90 or cu102
|
||||
if [[ "${#DESIRED_CUDA}" == 4 ]]; then
|
||||
cu_ver="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3}"
|
||||
else
|
||||
cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
|
||||
fi
|
||||
|
||||
retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c pytorch "\${CUDA_PACKAGE}=\${cu_ver}"
|
||||
retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c pytorch "cudatoolkit=\${cu_ver}"
|
||||
fi
|
||||
conda install \${EXTRA_CONDA_FLAGS} -y "\$pkg" --offline
|
||||
)
|
||||
|
||||
@ -1,19 +1,28 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
source "${BINARY_ENV_FILE:-/Users/distiller/project/env}"
|
||||
source "/Users/distiller/project/env"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
if [[ -z "${GITHUB_ACTIONS:-}" ]]; then
|
||||
export PATH="${workdir:-${HOME}}/miniconda/bin:${PATH}"
|
||||
fi
|
||||
# For some reason `unbuffer` breaks if we change the PATH here, so we
|
||||
# write a script with the PATH change in it and unbuffer the whole
|
||||
# thing
|
||||
build_script="$workdir/build_script.sh"
|
||||
touch "$build_script"
|
||||
chmod +x "$build_script"
|
||||
|
||||
# Build
|
||||
export USE_PYTORCH_METAL_EXPORT=1
|
||||
export USE_COREML_DELEGATE=1
|
||||
cat >"$build_script" <<EOL
|
||||
export PATH="$workdir/miniconda/bin:$PATH"
|
||||
if [[ "$CIRCLE_BRANCH" == "nightly" ]]; then
|
||||
export USE_PYTORCH_METAL_EXPORT=1
|
||||
export USE_COREML_DELEGATE=1
|
||||
fi
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
"${BUILDER_ROOT}/conda/build_pytorch.sh"
|
||||
"$workdir/builder/conda/build_pytorch.sh"
|
||||
else
|
||||
export TORCH_PACKAGE_NAME="$(echo $TORCH_PACKAGE_NAME | tr '-' '_')"
|
||||
"${BUILDER_ROOT}/wheel/build_wheel.sh"
|
||||
"$workdir/builder/wheel/build_wheel.sh"
|
||||
fi
|
||||
EOL
|
||||
unbuffer "$build_script" | ts
|
||||
|
||||
@ -5,7 +5,7 @@ export TZ=UTC
|
||||
tagged_version() {
|
||||
# Grabs version from either the env variable CIRCLE_TAG
|
||||
# or the pytorch git described version
|
||||
if [[ "$OSTYPE" == "msys" && -z "${GITHUB_ACTIONS:-}" ]]; then
|
||||
if [[ "$OSTYPE" == "msys" && -z "${IS_GHA:-}" ]]; then
|
||||
GIT_DIR="${workdir}/p/.git"
|
||||
else
|
||||
GIT_DIR="${workdir}/pytorch/.git"
|
||||
@ -23,12 +23,50 @@ tagged_version() {
|
||||
fi
|
||||
}
|
||||
|
||||
envfile=${BINARY_ENV_FILE:-/tmp/env}
|
||||
if [[ -n "${PYTORCH_ROOT}" ]]; then
|
||||
workdir=$(dirname "${PYTORCH_ROOT}")
|
||||
# These are only relevant for CircleCI
|
||||
# TODO: Remove these later once migrated fully to GHA
|
||||
if [[ -z ${IS_GHA:-} ]]; then
|
||||
# We need to write an envfile to persist these variables to following
|
||||
# steps, but the location of the envfile depends on the circleci executor
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# macos executor (builds and tests)
|
||||
workdir="/Users/distiller/project"
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
# windows executor (builds and tests)
|
||||
workdir="/c/w"
|
||||
elif [[ -d "/home/circleci/project" ]]; then
|
||||
# machine executor (binary tests)
|
||||
workdir="/home/circleci/project"
|
||||
else
|
||||
# docker executor (binary builds)
|
||||
workdir="/"
|
||||
fi
|
||||
envfile="$workdir/env"
|
||||
touch "$envfile"
|
||||
chmod +x "$envfile"
|
||||
|
||||
# Parse the BUILD_ENVIRONMENT to package type, python, and cuda
|
||||
configs=($BUILD_ENVIRONMENT)
|
||||
export PACKAGE_TYPE="${configs[0]}"
|
||||
export DESIRED_PYTHON="${configs[1]}"
|
||||
export DESIRED_CUDA="${configs[2]}"
|
||||
if [[ "${BUILD_FOR_SYSTEM:-}" == "windows" ]]; then
|
||||
export DESIRED_DEVTOOLSET=""
|
||||
export LIBTORCH_CONFIG="${configs[3]:-}"
|
||||
if [[ "$LIBTORCH_CONFIG" == 'debug' ]]; then
|
||||
export DEBUG=1
|
||||
fi
|
||||
else
|
||||
export DESIRED_DEVTOOLSET="${configs[3]:-}"
|
||||
fi
|
||||
else
|
||||
# docker executor (binary builds)
|
||||
workdir="/"
|
||||
envfile=${BINARY_ENV_FILE:-/tmp/env}
|
||||
if [[ -n "${PYTORCH_ROOT}" ]]; then
|
||||
workdir=$(dirname "${PYTORCH_ROOT}")
|
||||
else
|
||||
# docker executor (binary builds)
|
||||
workdir="/"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
@ -53,13 +91,18 @@ if [[ ${DESIRED_CUDA} == "cpu" ]]; then
|
||||
USE_GOLD_LINKER="ON"
|
||||
fi
|
||||
|
||||
USE_WHOLE_CUDNN="OFF"
|
||||
# Link whole cuDNN for CUDA-11.1 to include fp16 fast kernels
|
||||
if [[ "$(uname)" == "Linux" && "${DESIRED_CUDA}" == "cu111" ]]; then
|
||||
USE_WHOLE_CUDNN="ON"
|
||||
fi
|
||||
|
||||
# Default to nightly, since that's where this normally uploads to
|
||||
PIP_UPLOAD_FOLDER='nightly/'
|
||||
# We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it
|
||||
export DATE="$(date -u +%Y%m%d)"
|
||||
#TODO: We should be pulling semver version from the base version.txt
|
||||
BASE_BUILD_VERSION="1.13.0.dev$DATE"
|
||||
BASE_BUILD_VERSION="1.11.0.dev$DATE"
|
||||
# Change BASE_BUILD_VERSION to git tag when on a git tag
|
||||
# Use 'git -C' to make doubly sure we're in the correct directory for checking
|
||||
# the git tag
|
||||
@ -76,11 +119,6 @@ if [[ "$(uname)" == 'Darwin' ]] || [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
else
|
||||
export PYTORCH_BUILD_VERSION="${BASE_BUILD_VERSION}+$DESIRED_CUDA"
|
||||
fi
|
||||
|
||||
if [[ -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
export PYTORCH_BUILD_VERSION="${PYTORCH_BUILD_VERSION}-with-pypi-cudnn"
|
||||
fi
|
||||
|
||||
export PYTORCH_BUILD_NUMBER=1
|
||||
|
||||
|
||||
@ -120,18 +158,14 @@ export DESIRED_PYTHON="${DESIRED_PYTHON:-}"
|
||||
export DESIRED_CUDA="$DESIRED_CUDA"
|
||||
export LIBTORCH_VARIANT="${LIBTORCH_VARIANT:-}"
|
||||
export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}"
|
||||
if [[ "${OSTYPE}" == "msys" ]]; then
|
||||
export DESIRED_DEVTOOLSET="${DESIRED_DEVTOOLSET:-}"
|
||||
if [[ "${BUILD_FOR_SYSTEM:-}" == "windows" ]]; then
|
||||
export LIBTORCH_CONFIG="${LIBTORCH_CONFIG:-}"
|
||||
if [[ "${LIBTORCH_CONFIG:-}" == 'debug' ]]; then
|
||||
export DEBUG=1
|
||||
fi
|
||||
export DESIRED_DEVTOOLSET=""
|
||||
else
|
||||
export DESIRED_DEVTOOLSET="${DESIRED_DEVTOOLSET:-}"
|
||||
export DEBUG="${DEBUG:-}"
|
||||
fi
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}"
|
||||
|
||||
export DATE="$DATE"
|
||||
export NIGHTLIES_DATE_PREAMBLE=1.13.0.dev
|
||||
export NIGHTLIES_DATE_PREAMBLE=1.11.0.dev
|
||||
export PYTORCH_BUILD_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
export PYTORCH_BUILD_NUMBER="$PYTORCH_BUILD_NUMBER"
|
||||
export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
@ -150,6 +184,7 @@ export DOCKER_IMAGE="$DOCKER_IMAGE"
|
||||
|
||||
export USE_GOLD_LINKER="${USE_GOLD_LINKER}"
|
||||
export USE_GLOO_WITH_OPENSSL="ON"
|
||||
export USE_WHOLE_CUDNN="${USE_WHOLE_CUDNN}"
|
||||
# =================== The above code will be executed inside Docker container ===================
|
||||
EOL
|
||||
|
||||
@ -167,7 +202,7 @@ if [[ "$(uname)" != Darwin ]]; then
|
||||
EOL
|
||||
fi
|
||||
|
||||
if [[ -z "${GITHUB_ACTIONS:-}" ]]; then
|
||||
if [[ -z "${IS_GHA:-}" ]]; then
|
||||
cat >>"$envfile" <<EOL
|
||||
export workdir="$workdir"
|
||||
export MAC_PACKAGE_WORK_DIR="$workdir"
|
||||
|
||||
@ -14,12 +14,6 @@ UPLOAD_CHANNEL=${UPLOAD_CHANNEL:-nightly}
|
||||
UPLOAD_SUBFOLDER=${UPLOAD_SUBFOLDER:-cpu}
|
||||
UPLOAD_BUCKET="s3://pytorch"
|
||||
BACKUP_BUCKET="s3://pytorch-backup"
|
||||
BUILD_NAME=${BUILD_NAME:-}
|
||||
|
||||
# this is temporary change to upload pypi-cudnn builds to separate folder
|
||||
if [[ ${BUILD_NAME} == *with-pypi-cudnn* ]]; then
|
||||
UPLOAD_SUBFOLDER="${UPLOAD_SUBFOLDER}_pypi_cudnn"
|
||||
fi
|
||||
|
||||
DRY_RUN=${DRY_RUN:-enabled}
|
||||
# Don't actually do work unless explicit
|
||||
@ -30,11 +24,6 @@ if [[ "${DRY_RUN}" = "disabled" ]]; then
|
||||
AWS_S3_CP="aws s3 cp"
|
||||
fi
|
||||
|
||||
# Sleep 2 minutes between retries for conda upload
|
||||
retry () {
|
||||
"$@" || (sleep 5m && "$@") || (sleep 5m && "$@") || (sleep 5m && "$@") || (sleep 5m && "$@")
|
||||
}
|
||||
|
||||
do_backup() {
|
||||
local backup_dir
|
||||
backup_dir=$1
|
||||
@ -48,14 +37,13 @@ do_backup() {
|
||||
conda_upload() {
|
||||
(
|
||||
set -x
|
||||
retry \
|
||||
${ANACONDA} \
|
||||
upload \
|
||||
${PKG_DIR}/*.tar.bz2 \
|
||||
-u "pytorch-${UPLOAD_CHANNEL}" \
|
||||
--label main \
|
||||
--no-progress \
|
||||
--force
|
||||
upload \
|
||||
${PKG_DIR}/*.tar.bz2 \
|
||||
-u "pytorch-${UPLOAD_CHANNEL}" \
|
||||
--label main \
|
||||
--no-progress \
|
||||
--force
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@ -6,18 +6,16 @@ mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
export CUDA_VERSION="${DESIRED_CUDA/cu/}"
|
||||
export USE_SCCACHE=1
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache
|
||||
export SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache-windows
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
export VC_YEAR=2019
|
||||
|
||||
if [[ "${DESIRED_CUDA}" == *"cu11"* ]]; then
|
||||
export BUILD_SPLIT_CUDA=ON
|
||||
fi
|
||||
|
||||
|
||||
echo "Free Space for CUDA DEBUG BUILD"
|
||||
if [[ "${CIRCLECI:-}" == 'true' ]]; then
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
if [[ -d "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community" ]]; then
|
||||
rm -rf "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community"
|
||||
fi
|
||||
@ -72,7 +70,6 @@ pushd "$BUILDER_ROOT"
|
||||
if [[ "$PACKAGE_TYPE" == 'conda' ]]; then
|
||||
./windows/internal/build_conda.bat
|
||||
elif [[ "$PACKAGE_TYPE" == 'wheel' || "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
./windows/internal/build_wheels.bat
|
||||
fi
|
||||
|
||||
|
||||
@ -78,7 +78,7 @@ if [[ "${BUILD_ENVIRONMENT}" == *-gradle-build-only-x86_32* ]]; then
|
||||
GRADLE_PARAMS+=" -PABI_FILTERS=x86"
|
||||
fi
|
||||
|
||||
if [ -n "${GRADLE_OFFLINE:-}" ]; then
|
||||
if [ -n "{GRADLE_OFFLINE:-}" ]; then
|
||||
GRADLE_PARAMS+=" --offline"
|
||||
fi
|
||||
|
||||
|
||||
@ -34,9 +34,9 @@ echo "error: cpp_doc_push_script.sh: install_path (arg1) not specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
is_main_doc=false
|
||||
is_master_doc=false
|
||||
if [ "$version" == "master" ]; then
|
||||
is_main_doc=true
|
||||
is_master_doc=true
|
||||
fi
|
||||
|
||||
echo "install_path: $install_path version: $version"
|
||||
@ -51,10 +51,12 @@ git clone https://github.com/pytorch/cppdocs
|
||||
|
||||
set -ex
|
||||
|
||||
sudo apt-get -y install doxygen
|
||||
|
||||
# Generate ATen files
|
||||
pushd "${pt_checkout}"
|
||||
pip install -r requirements.txt
|
||||
time python -m torchgen.gen \
|
||||
time python -m tools.codegen.gen \
|
||||
-s aten/src/ATen \
|
||||
-d build/aten/src/ATen
|
||||
|
||||
@ -64,7 +66,7 @@ cp torch/_utils_internal.py tools/shared
|
||||
# Generate PyTorch files
|
||||
time python tools/setup_helpers/generate_code.py \
|
||||
--native-functions-path aten/src/ATen/native/native_functions.yaml \
|
||||
--tags-path aten/src/ATen/native/tags.yaml
|
||||
--nn-path aten/src/
|
||||
|
||||
# Build the docs
|
||||
pushd docs/cpp
|
||||
@ -98,9 +100,6 @@ git commit -m "Generate C++ docs from pytorch/pytorch@${GITHUB_SHA}" || true
|
||||
git status
|
||||
|
||||
if [[ "${WITH_PUSH:-}" == true ]]; then
|
||||
# push to a temp branch first to trigger CLA check and satisfy branch protections
|
||||
git push -u origin HEAD:pytorchbot/temp-branch-cpp -f
|
||||
sleep 30
|
||||
git push -u origin
|
||||
fi
|
||||
|
||||
|
||||
@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
# =================== The following code **should** be executed inside Docker container ===================
|
||||
|
||||
# Install dependencies
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install expect-dev
|
||||
|
||||
# This is where the local pytorch install in the docker image is located
|
||||
pt_checkout="/var/lib/jenkins/workspace"
|
||||
source "$pt_checkout/.jenkins/pytorch/common_utils.sh"
|
||||
echo "functorch_doc_push_script.sh: Invoked with $*"
|
||||
|
||||
set -ex
|
||||
|
||||
version=${DOCS_VERSION:-nightly}
|
||||
echo "version: $version"
|
||||
|
||||
# Build functorch docs
|
||||
pushd $pt_checkout/functorch/docs
|
||||
pip -q install -r requirements.txt
|
||||
make html
|
||||
popd
|
||||
|
||||
git clone https://github.com/pytorch/functorch -b gh-pages --depth 1 functorch_ghpages
|
||||
pushd functorch_ghpages
|
||||
|
||||
if [ $version == "master" ]; then
|
||||
version=nightly
|
||||
fi
|
||||
|
||||
git rm -rf "$version" || true
|
||||
mv "$pt_checkout/functorch/docs/build/html" "$version"
|
||||
|
||||
git add "$version" || true
|
||||
git status
|
||||
git config user.email "soumith+bot@pytorch.org"
|
||||
git config user.name "pytorchbot"
|
||||
# If there aren't changes, don't make a commit; push is no-op
|
||||
git commit -m "Generate Python docs from pytorch/pytorch@${GITHUB_SHA}" || true
|
||||
git status
|
||||
|
||||
if [[ "${WITH_PUSH:-}" == true ]]; then
|
||||
git push -u origin gh-pages
|
||||
fi
|
||||
|
||||
popd
|
||||
# =================== The above code **should** be executed inside Docker container ===================
|
||||
@ -37,9 +37,9 @@ echo "error: python_doc_push_script.sh: install_path (arg1) not specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
is_main_doc=false
|
||||
is_master_doc=false
|
||||
if [ "$version" == "master" ]; then
|
||||
is_main_doc=true
|
||||
is_master_doc=true
|
||||
fi
|
||||
|
||||
# Argument 3: The branch to push to. Usually is "site"
|
||||
@ -86,7 +86,7 @@ pushd docs
|
||||
|
||||
# Build the docs
|
||||
pip -q install -r requirements.txt
|
||||
if [ "$is_main_doc" = true ]; then
|
||||
if [ "$is_master_doc" = true ]; then
|
||||
build_docs html
|
||||
[ $? -eq 0 ] || exit $?
|
||||
make coverage
|
||||
@ -135,9 +135,6 @@ git commit -m "Generate Python docs from pytorch/pytorch@${GITHUB_SHA}" || true
|
||||
git status
|
||||
|
||||
if [[ "${WITH_PUSH:-}" == true ]]; then
|
||||
# push to a temp branch first to trigger CLA check and satisfy branch protections
|
||||
git push -u origin HEAD:pytorchbot/temp-branch-py -f
|
||||
sleep 30
|
||||
git push -u origin "${branch}"
|
||||
fi
|
||||
|
||||
|
||||
@ -32,7 +32,7 @@ if ! command -v aws >/dev/null; then
|
||||
fi
|
||||
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-515.57.run"
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-495.44.run"
|
||||
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
|
||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
||||
nvidia-smi
|
||||
@ -66,6 +66,7 @@ add_to_env_file() {
|
||||
esac
|
||||
}
|
||||
|
||||
add_to_env_file IN_CI 1
|
||||
add_to_env_file CI_MASTER "${CI_MASTER:-}"
|
||||
add_to_env_file COMMIT_SOURCE "${CIRCLE_BRANCH:-}"
|
||||
add_to_env_file BUILD_ENVIRONMENT "${BUILD_ENVIRONMENT}"
|
||||
|
||||
@ -11,7 +11,7 @@ AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/"
|
||||
AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRET", "")
|
||||
PIPELINE_ID = "911"
|
||||
PROJECT_ID = "0628bce4-2d33-499e-bac5-530e12db160f"
|
||||
TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "main")
|
||||
TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "master")
|
||||
TARGET_COMMIT = os.environ.get("CIRCLE_SHA1", "")
|
||||
|
||||
build_base_url = AZURE_PIPELINE_BASE_URL + "_apis/build/builds?api-version=6.0"
|
||||
|
||||
@ -2,23 +2,26 @@
|
||||
set -eux -o pipefail
|
||||
|
||||
case ${CUDA_VERSION} in
|
||||
10.1)
|
||||
cuda_installer_name="cuda_10.1.243_426.00_win10"
|
||||
cuda_install_packages="nvcc_10.1 cuobjdump_10.1 nvprune_10.1 cupti_10.1 cublas_10.1 cublas_dev_10.1 cudart_10.1 cufft_10.1 cufft_dev_10.1 curand_10.1 curand_dev_10.1 cusolver_10.1 cusolver_dev_10.1 cusparse_10.1 cusparse_dev_10.1 nvgraph_10.1 nvgraph_dev_10.1 npp_10.1 npp_dev_10.1 nvrtc_10.1 nvrtc_dev_10.1 nvml_dev_10.1"
|
||||
;;
|
||||
10.2)
|
||||
cuda_installer_name="cuda_10.2.89_441.22_win10"
|
||||
cuda_install_packages="nvcc_10.2 cuobjdump_10.2 nvprune_10.2 cupti_10.2 cublas_10.2 cublas_dev_10.2 cudart_10.2 cufft_10.2 cufft_dev_10.2 curand_10.2 curand_dev_10.2 cusolver_10.2 cusolver_dev_10.2 cusparse_10.2 cusparse_dev_10.2 nvgraph_10.2 nvgraph_dev_10.2 npp_10.2 npp_dev_10.2 nvrtc_10.2 nvrtc_dev_10.2 nvml_dev_10.2"
|
||||
;;
|
||||
11.1)
|
||||
cuda_installer_name="cuda_11.1.1_456.81_win10"
|
||||
cuda_install_packages="nvcc_11.1 cuobjdump_11.1 nvprune_11.1 nvprof_11.1 cupti_11.1 cublas_11.1 cublas_dev_11.1 cudart_11.1 cufft_11.1 cufft_dev_11.1 curand_11.1 curand_dev_11.1 cusolver_11.1 cusolver_dev_11.1 cusparse_11.1 cusparse_dev_11.1 npp_11.1 npp_dev_11.1 nvrtc_11.1 nvrtc_dev_11.1 nvml_dev_11.1"
|
||||
;;
|
||||
11.3)
|
||||
cuda_installer_name="cuda_11.3.0_465.89_win10"
|
||||
cuda_install_packages="thrust_11.3 nvcc_11.3 cuobjdump_11.3 nvprune_11.3 nvprof_11.3 cupti_11.3 cublas_11.3 cublas_dev_11.3 cudart_11.3 cufft_11.3 cufft_dev_11.3 curand_11.3 curand_dev_11.3 cusolver_11.3 cusolver_dev_11.3 cusparse_11.3 cusparse_dev_11.3 npp_11.3 npp_dev_11.3 nvrtc_11.3 nvrtc_dev_11.3 nvml_dev_11.3"
|
||||
;;
|
||||
11.6)
|
||||
cuda_installer_name="cuda_11.6.0_511.23_windows"
|
||||
cuda_install_packages="thrust_11.6 nvcc_11.6 cuobjdump_11.6 nvprune_11.6 nvprof_11.6 cupti_11.6 cublas_11.6 cublas_dev_11.6 cudart_11.6 cufft_11.6 cufft_dev_11.6 curand_11.6 curand_dev_11.6 cusolver_11.6 cusolver_dev_11.6 cusparse_11.6 cusparse_dev_11.6 npp_11.6 npp_dev_11.6 nvrtc_11.6 nvrtc_dev_11.6 nvml_dev_11.6"
|
||||
11.5)
|
||||
cuda_installer_name="cuda_11.5.0_496.13_win10"
|
||||
cuda_install_packages="thrust_11.5 nvcc_11.5 cuobjdump_11.5 nvprune_11.5 nvprof_11.5 cupti_11.5 cublas_11.5 cublas_dev_11.5 cudart_11.5 cufft_11.5 cufft_dev_11.5 curand_11.5 curand_dev_11.5 cusolver_11.5 cusolver_dev_11.5 cusparse_11.5 cusparse_dev_11.5 npp_11.5 npp_dev_11.5 nvrtc_11.5 nvrtc_dev_11.5 nvml_dev_11.5"
|
||||
;;
|
||||
11.7)
|
||||
cuda_installer_name="cuda_11.7.0_516.01_windows"
|
||||
cuda_install_packages="thrust_11.7 nvcc_11.7 cuobjdump_11.7 nvprune_11.7 nvprof_11.7 cupti_11.7 cublas_11.7 cublas_dev_11.7 cudart_11.7 cufft_11.7 cufft_dev_11.7 curand_11.7 curand_dev_11.7 cusolver_11.7 cusolver_dev_11.7 cusparse_11.7 cusparse_dev_11.7 npp_11.7 npp_dev_11.7 nvrtc_11.7 nvrtc_dev_11.7 nvml_dev_11.7"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "CUDA_VERSION $CUDA_VERSION is not supported yet"
|
||||
exit 1
|
||||
|
||||
@ -5,20 +5,22 @@ set -eux -o pipefail
|
||||
windows_s3_link="https://ossci-windows.s3.amazonaws.com"
|
||||
|
||||
case ${CUDA_VERSION} in
|
||||
10.1)
|
||||
# This is typically blank but for CUDA 10* it'll be set to 10
|
||||
cudnn_file_name="cudnn-${CUDA_VERSION}-windows10-x64-v7.6.4.38"
|
||||
;;
|
||||
10.2)
|
||||
cudnn_file_name="cudnn-${CUDA_VERSION}-windows10-x64-v7.6.5.32"
|
||||
;;
|
||||
11.1)
|
||||
cudnn_file_name="cudnn-${CUDA_VERSION}-windows-x64-v8.0.5.39"
|
||||
;;
|
||||
11.3)
|
||||
# Use cudnn8.3 with hard-coded cuda11.3 version
|
||||
cudnn_file_name="cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive"
|
||||
cudnn_file_name="cudnn-${CUDA_VERSION}-windows-x64-v8.2.0.53"
|
||||
;;
|
||||
11.6)
|
||||
# Use cudnn8.3 with hard-coded cuda11.5 version
|
||||
cudnn_file_name="cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive"
|
||||
;;
|
||||
11.7)
|
||||
# Use cudnn8.3 with hard-coded cuda11.5 version
|
||||
cudnn_file_name="cudnn-windows-x86_64-8.5.0.96_cuda11-archive"
|
||||
11.5)
|
||||
# Since cudnn 8.3 the filename have changed
|
||||
cudnn_file_name="cudnn-windows-x86_64-8.3.2.44_cuda${CUDA_VERSION}-archive"
|
||||
;;
|
||||
*)
|
||||
echo "CUDA_VERSION: ${CUDA_VERSION} not supported yet"
|
||||
|
||||
@ -62,4 +62,5 @@ binary_windows_params: &binary_windows_params
|
||||
default: "windows-xlarge-cpu-with-nvidia-cuda"
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: << parameters.build_environment >>
|
||||
BUILD_FOR_SYSTEM: windows
|
||||
JOB_EXECUTOR: <<parameters.executor>>
|
||||
|
||||
@ -0,0 +1,14 @@
|
||||
|
||||
promote_common: &promote_common
|
||||
docker:
|
||||
- image: pytorch/release
|
||||
parameters:
|
||||
package_name:
|
||||
description: "package name to promote"
|
||||
type: string
|
||||
default: ""
|
||||
environment:
|
||||
PACKAGE_NAME: << parameters.package_name >>
|
||||
ANACONDA_API_TOKEN: ${CONDA_PYTORCHBOT_TOKEN}
|
||||
AWS_ACCESS_KEY_ID: ${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}
|
||||
AWS_SECRET_ACCESS_KEY: ${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}
|
||||
@ -132,3 +132,43 @@ commands:
|
||||
else
|
||||
echo "This is not a pull request, skipping..."
|
||||
fi
|
||||
|
||||
upload_binary_size_for_android_build:
|
||||
description: "Upload binary size data for Android build"
|
||||
parameters:
|
||||
build_type:
|
||||
type: string
|
||||
default: ""
|
||||
artifacts:
|
||||
type: string
|
||||
default: ""
|
||||
steps:
|
||||
- run:
|
||||
name: "Binary Size - Install Dependencies"
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
retry pip3 install requests
|
||||
- run:
|
||||
name: "Binary Size - Untar Artifacts"
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
# The artifact file is created inside docker container, which contains the result binaries.
|
||||
# Now unpackage it into the project folder. The subsequent script will scan project folder
|
||||
# to locate result binaries and report their sizes.
|
||||
# If artifact file is not provided it assumes that the project folder has been mounted in
|
||||
# the docker during build and already contains the result binaries, so this step can be skipped.
|
||||
export ARTIFACTS="<< parameters.artifacts >>"
|
||||
if [ -n "${ARTIFACTS}" ]; then
|
||||
tar xf "${ARTIFACTS}" -C ~/project
|
||||
fi
|
||||
- run:
|
||||
name: "Binary Size - Upload << parameters.build_type >>"
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
cd ~/project
|
||||
export ANDROID_BUILD_TYPE="<< parameters.build_type >>"
|
||||
export COMMIT_TIME=$(git log --max-count=1 --format=%ct || echo 0)
|
||||
python3 -m tools.stats.upload_binary_size_to_scuba android
|
||||
|
||||
@ -3,12 +3,12 @@
|
||||
# binary_linux_libtorch_3.6m_cpu_test:
|
||||
# environment:
|
||||
# BUILD_ENVIRONMENT: "libtorch 3.6m cpu"
|
||||
# resource_class: gpu.nvidia.small
|
||||
# resource_class: gpu.medium
|
||||
# <<: *binary_linux_test
|
||||
#
|
||||
# binary_linux_libtorch_3.6m_cu90_test:
|
||||
# environment:
|
||||
# BUILD_ENVIRONMENT: "libtorch 3.6m cu90"
|
||||
# resource_class: gpu.nvidia.small
|
||||
# resource_class: gpu.medium
|
||||
# <<: *binary_linux_test
|
||||
#
|
||||
|
||||
@ -1,4 +1,242 @@
|
||||
jobs:
|
||||
binary_linux_build:
|
||||
<<: *binary_linux_build_params
|
||||
steps:
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
source "/pytorch/.circleci/scripts/binary_linux_build.sh"
|
||||
# Preserve build log
|
||||
if [ -f /pytorch/build/.ninja_log ]; then
|
||||
cp /pytorch/build/.ninja_log /final_pkgs
|
||||
fi
|
||||
- run:
|
||||
name: Output binary sizes
|
||||
no_output_timeout: "1m"
|
||||
command: |
|
||||
ls -lah /final_pkgs
|
||||
- run:
|
||||
name: upload build & binary data
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
source /env
|
||||
cd /pytorch && export COMMIT_TIME=$(git log --max-count=1 --format=%ct || echo 0)
|
||||
python3 -mpip install requests && \
|
||||
SCRIBE_GRAPHQL_ACCESS_TOKEN=${SCRIBE_GRAPHQL_ACCESS_TOKEN} \
|
||||
python3 -m tools.stats.upload_binary_size_to_scuba || exit 0
|
||||
- persist_to_workspace:
|
||||
root: /
|
||||
paths: final_pkgs
|
||||
|
||||
- store_artifacts:
|
||||
path: /final_pkgs
|
||||
|
||||
# This should really just be another step of the binary_linux_build job above.
|
||||
# This isn't possible right now b/c the build job uses the docker executor
|
||||
# (otherwise they'd be really really slow) but this one uses the macine
|
||||
# executor (b/c we have to run the docker with --runtime=nvidia and we can't do
|
||||
# that on the docker executor)
|
||||
binary_linux_test:
|
||||
<<: *binary_linux_test_upload_params
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /home/circleci/project
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
name: Prepare test code
|
||||
no_output_timeout: "1h"
|
||||
command: .circleci/scripts/binary_linux_test.sh
|
||||
- run:
|
||||
<<: *binary_run_in_docker
|
||||
|
||||
binary_upload:
|
||||
parameters:
|
||||
package_type:
|
||||
type: string
|
||||
description: "What type of package we are uploading (eg. wheel, libtorch, conda)"
|
||||
default: "wheel"
|
||||
upload_subfolder:
|
||||
type: string
|
||||
description: "What subfolder to put our package into (eg. cpu, cudaX.Y, etc.)"
|
||||
default: "cpu"
|
||||
docker:
|
||||
- image: continuumio/miniconda3
|
||||
environment:
|
||||
- DRY_RUN: disabled
|
||||
- PACKAGE_TYPE: "<< parameters.package_type >>"
|
||||
- UPLOAD_SUBFOLDER: "<< parameters.upload_subfolder >>"
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- checkout
|
||||
- designate_upload_channel
|
||||
- run:
|
||||
name: Install dependencies
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
conda install -yq anaconda-client
|
||||
pip install -q awscli
|
||||
- run:
|
||||
name: Do upload
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" \
|
||||
AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" \
|
||||
ANACONDA_API_TOKEN="${CONDA_PYTORCHBOT_TOKEN}" \
|
||||
.circleci/scripts/binary_upload.sh
|
||||
|
||||
# Nighlty build smoke tests defaults
|
||||
# These are the second-round smoke tests. These make sure that the binaries are
|
||||
# correct from a user perspective, testing that they exist from the cloud are
|
||||
# are runnable. Note that the pytorch repo is never cloned into these jobs
|
||||
##############################################################################
|
||||
smoke_linux_test:
|
||||
<<: *binary_linux_test_upload_params
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
cat >/home/circleci/project/ci_test_script.sh \<<EOL
|
||||
# The following code will be executed inside Docker container
|
||||
set -eux -o pipefail
|
||||
/builder/smoke_test.sh
|
||||
# The above code will be executed inside Docker container
|
||||
EOL
|
||||
- run:
|
||||
<<: *binary_run_in_docker
|
||||
|
||||
smoke_mac_test:
|
||||
<<: *binary_linux_test_upload_params
|
||||
macos:
|
||||
xcode: "12.0"
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- brew_update
|
||||
- run:
|
||||
<<: *binary_install_miniconda
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
source "/Users/distiller/project/env"
|
||||
export "PATH=$workdir/miniconda/bin:$PATH"
|
||||
# TODO unbuffer and ts this, but it breaks cause miniconda overwrites
|
||||
# tclsh. But unbuffer and ts aren't that important so they're just
|
||||
# disabled for now
|
||||
./builder/smoke_test.sh
|
||||
|
||||
binary_mac_build:
|
||||
<<: *binary_mac_params
|
||||
macos:
|
||||
xcode: "12.0"
|
||||
resource_class: "large"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- checkout
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- brew_update
|
||||
- run:
|
||||
<<: *binary_install_miniconda
|
||||
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "90m"
|
||||
command: |
|
||||
# Do not set -u here; there is some problem with CircleCI
|
||||
# variable expansion with PROMPT_COMMAND
|
||||
set -ex -o pipefail
|
||||
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_build.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
# Do not set -u here; there is some problem with CircleCI
|
||||
# variable expansion with PROMPT_COMMAND
|
||||
set -ex -o pipefail
|
||||
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_test.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
- persist_to_workspace:
|
||||
root: /Users/distiller/project
|
||||
paths: final_pkgs
|
||||
|
||||
- store_artifacts:
|
||||
path: /Users/distiller/project/final_pkgs
|
||||
|
||||
binary_macos_arm64_build:
|
||||
<<: *binary_mac_params
|
||||
macos:
|
||||
xcode: "12.3.0"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- checkout
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- brew_update
|
||||
- run:
|
||||
<<: *binary_install_miniconda
|
||||
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "90m"
|
||||
command: |
|
||||
# Do not set -u here; there is some problem with CircleCI
|
||||
# variable expansion with PROMPT_COMMAND
|
||||
set -ex -o pipefail
|
||||
export CROSS_COMPILE_ARM64=1
|
||||
script="/Users/distiller/project/pytorch/.circleci/scripts/binary_macos_build.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
- persist_to_workspace:
|
||||
root: /Users/distiller/project
|
||||
paths: final_pkgs
|
||||
|
||||
- store_artifacts:
|
||||
path: /Users/distiller/project/final_pkgs
|
||||
|
||||
|
||||
binary_ios_build:
|
||||
<<: *pytorch_ios_params
|
||||
macos:
|
||||
@ -43,6 +281,90 @@ jobs:
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
binary_windows_build:
|
||||
<<: *binary_windows_params
|
||||
parameters:
|
||||
build_environment:
|
||||
type: string
|
||||
default: ""
|
||||
executor:
|
||||
type: string
|
||||
default: "windows-xlarge-cpu-with-nvidia-cuda"
|
||||
executor: <<parameters.executor>>
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- checkout
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -eux -o pipefail
|
||||
script="/c/w/p/.circleci/scripts/binary_windows_build.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
- persist_to_workspace:
|
||||
root: "C:/w"
|
||||
paths: final_pkgs
|
||||
- store_artifacts:
|
||||
path: C:/w/final_pkgs
|
||||
|
||||
binary_windows_test:
|
||||
<<: *binary_windows_params
|
||||
parameters:
|
||||
build_environment:
|
||||
type: string
|
||||
default: ""
|
||||
executor:
|
||||
type: string
|
||||
default: "windows-medium-cpu-with-nvidia-cuda"
|
||||
executor: <<parameters.executor>>
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: c:/users/circleci/project
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -eux -o pipefail
|
||||
script="/c/w/p/.circleci/scripts/binary_windows_test.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
smoke_windows_test:
|
||||
<<: *binary_windows_params
|
||||
parameters:
|
||||
build_environment:
|
||||
type: string
|
||||
default: ""
|
||||
executor:
|
||||
type: string
|
||||
default: "windows-medium-cpu-with-nvidia-cuda"
|
||||
executor: <<parameters.executor>>
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
<<: *binary_checkout
|
||||
- run:
|
||||
<<: *binary_populate_env
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -eux -o pipefail
|
||||
export TEST_NIGHTLY_PACKAGE=1
|
||||
script="/c/w/p/.circleci/scripts/binary_windows_test.sh"
|
||||
cat "$script"
|
||||
source "$script"
|
||||
|
||||
anaconda_prune:
|
||||
parameters:
|
||||
packages:
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
parameters:
|
||||
branch:
|
||||
type: string
|
||||
default: "main"
|
||||
default: "master"
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
@ -24,6 +24,95 @@
|
||||
pushd /tmp/workspace
|
||||
git push -u origin "<< parameters.branch >>"
|
||||
|
||||
pytorch_python_doc_build:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-python-doc-push
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.7-gcc5.4"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
name: Doc Build and Push
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}:build-${DOCKER_TAG}-${CIRCLE_SHA1}
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
# turn v1.12.0rc3 into 1.12
|
||||
tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9]*\.[0-9]*\).*/\1/')
|
||||
target=${tag:-master}
|
||||
echo "building for ${target}"
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
|
||||
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && '"export CIRCLE_SHA1='$CIRCLE_SHA1'"' && . ./.circleci/scripts/python_doc_push_script.sh docs/'$target' '$target' site") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
mkdir -p ~/workspace/build_artifacts
|
||||
docker cp $id:/var/lib/jenkins/workspace/pytorch.github.io/docs/master ~/workspace/build_artifacts
|
||||
docker cp $id:/var/lib/jenkins/workspace/pytorch.github.io /tmp/workspace
|
||||
|
||||
# Save the docs build so we can debug any problems
|
||||
export DEBUG_COMMIT_DOCKER_IMAGE=${COMMIT_DOCKER_IMAGE}-debug
|
||||
docker commit "$id" ${DEBUG_COMMIT_DOCKER_IMAGE}
|
||||
time docker push ${DEBUG_COMMIT_DOCKER_IMAGE}
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- .
|
||||
- store_artifacts:
|
||||
path: ~/workspace/build_artifacts/master
|
||||
destination: docs
|
||||
|
||||
pytorch_cpp_doc_build:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-cpp-doc-push
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.7-gcc5.4"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
name: Doc Build and Push
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -ex
|
||||
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}:build-${DOCKER_TAG}-${CIRCLE_SHA1}
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
# turn v1.12.0rc3 into 1.12
|
||||
tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9]*\.[0-9]*\).*/\1/')
|
||||
target=${tag:-master}
|
||||
echo "building for ${target}"
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
|
||||
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && '"export CIRCLE_SHA1='$CIRCLE_SHA1'"' && . ./.circleci/scripts/cpp_doc_push_script.sh docs/"$target" master") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
mkdir -p ~/workspace/build_artifacts
|
||||
docker cp $id:/var/lib/jenkins/workspace/cppdocs/ /tmp/workspace
|
||||
|
||||
# Save the docs build so we can debug any problems
|
||||
export DEBUG_COMMIT_DOCKER_IMAGE=${COMMIT_DOCKER_IMAGE}-debug
|
||||
docker commit "$id" ${DEBUG_COMMIT_DOCKER_IMAGE}
|
||||
time docker push ${DEBUG_COMMIT_DOCKER_IMAGE}
|
||||
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- .
|
||||
|
||||
pytorch_macos_10_15_py3_build:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-macos-10.15-py3-arm64-build
|
||||
@ -37,6 +126,7 @@
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
export IN_CI=1
|
||||
export CROSS_COMPILE_ARM64=1
|
||||
export JOB_BASE_NAME=$CIRCLE_JOB
|
||||
|
||||
@ -74,6 +164,7 @@
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
export IN_CI=1
|
||||
export JOB_BASE_NAME=$CIRCLE_JOB
|
||||
|
||||
# Install sccache
|
||||
@ -95,198 +186,6 @@
|
||||
paths:
|
||||
- miniconda3
|
||||
|
||||
mac_build:
|
||||
parameters:
|
||||
build-environment:
|
||||
type: string
|
||||
description: Top-level label for what's being built/tested.
|
||||
xcode-version:
|
||||
type: string
|
||||
default: "13.3.1"
|
||||
description: What xcode version to build with.
|
||||
build-generates-artifacts:
|
||||
type: boolean
|
||||
default: true
|
||||
description: if the build generates build artifacts
|
||||
python-version:
|
||||
type: string
|
||||
default: "3.8"
|
||||
macos:
|
||||
xcode: << parameters.xcode-version >>
|
||||
resource_class: medium
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: << parameters.build-environment >>
|
||||
AWS_REGION: us-east-1
|
||||
steps:
|
||||
|
||||
- checkout
|
||||
- run_brew_for_macos_build
|
||||
|
||||
- run:
|
||||
name: Install sccache
|
||||
command: |
|
||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
||||
sudo chmod +x /usr/local/bin/sccache
|
||||
echo "export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${BASH_ENV}"
|
||||
echo "export SCCACHE_S3_KEY_PREFIX=${GITHUB_WORKFLOW}" >> "${BASH_ENV}"
|
||||
|
||||
set +x
|
||||
echo "export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}" >> "${BASH_ENV}"
|
||||
echo "export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}" >> "${BASH_ENV}"
|
||||
set -x
|
||||
|
||||
- run:
|
||||
name: Get workflow job id
|
||||
command: |
|
||||
echo "export OUR_GITHUB_JOB_ID=${CIRCLE_WORKFLOW_JOB_ID}" >> "${BASH_ENV}"
|
||||
|
||||
- run:
|
||||
name: Build
|
||||
command: |
|
||||
set -x
|
||||
|
||||
git submodule sync
|
||||
git submodule update --init --recursive --depth 1 --jobs 0
|
||||
|
||||
export PATH="/usr/local/bin:$PATH"
|
||||
export WORKSPACE_DIR="${HOME}/workspace"
|
||||
mkdir -p "${WORKSPACE_DIR}"
|
||||
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-py38_4.12.0-MacOSX-x86_64.sh"
|
||||
if [ << parameters.python-version >> == 3.9.12 ]; then
|
||||
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-py39_4.12.0-MacOSX-x86_64.sh"
|
||||
fi
|
||||
|
||||
# If a local installation of conda doesn't exist, we download and install conda
|
||||
if [ ! -d "${WORKSPACE_DIR}/miniconda3" ]; then
|
||||
mkdir -p "${WORKSPACE_DIR}"
|
||||
curl --retry 3 ${MINICONDA_URL} -o "${WORKSPACE_DIR}"/miniconda3.sh
|
||||
bash "${WORKSPACE_DIR}"/miniconda3.sh -b -p "${WORKSPACE_DIR}"/miniconda3
|
||||
fi
|
||||
export PATH="${WORKSPACE_DIR}/miniconda3/bin:$PATH"
|
||||
# shellcheck disable=SC1091
|
||||
source "${WORKSPACE_DIR}"/miniconda3/bin/activate
|
||||
|
||||
brew link --force libomp
|
||||
|
||||
echo "export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname "$(which conda)")/../"}" >> "${BASH_ENV}"
|
||||
.jenkins/pytorch/macos-build.sh
|
||||
|
||||
- when:
|
||||
condition: << parameters.build-generates-artifacts >>
|
||||
steps:
|
||||
- run:
|
||||
name: Archive artifacts into zip
|
||||
command: |
|
||||
zip -1 -r artifacts.zip dist/ build/.ninja_log build/compile_commands.json .pytorch-test-times.json
|
||||
cp artifacts.zip /Users/distiller/workspace
|
||||
|
||||
- persist_to_workspace:
|
||||
root: /Users/distiller/workspace/
|
||||
paths:
|
||||
- miniconda3
|
||||
- artifacts.zip
|
||||
|
||||
- store_artifacts:
|
||||
path: /Users/distiller/project/artifacts.zip
|
||||
|
||||
mac_test:
|
||||
parameters:
|
||||
build-environment:
|
||||
type: string
|
||||
shard-number:
|
||||
type: string
|
||||
num-test-shards:
|
||||
type: string
|
||||
xcode-version:
|
||||
type: string
|
||||
test-config:
|
||||
type: string
|
||||
default: 'default'
|
||||
|
||||
macos:
|
||||
xcode: << parameters.xcode-version >>
|
||||
environment:
|
||||
GIT_DEFAULT_BRANCH: 'master'
|
||||
BUILD_ENVIRONMENT: << parameters.build-environment >>
|
||||
TEST_CONFIG: << parameters.test-config >>
|
||||
SHARD_NUMBER: << parameters.shard-number >>
|
||||
NUM_TEST_SHARDS: << parameters.num-test-shards >>
|
||||
PYTORCH_RETRY_TEST_CASES: 1
|
||||
PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/workspace
|
||||
- run_brew_for_macos_build
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "2h"
|
||||
command: |
|
||||
set -x
|
||||
|
||||
git submodule sync --recursive
|
||||
git submodule update --init --recursive
|
||||
|
||||
mv ~/workspace/artifacts.zip .
|
||||
unzip artifacts.zip
|
||||
|
||||
export IN_CI=1
|
||||
|
||||
COMMIT_MESSAGES=$(git cherry -v "origin/${GIT_DEFAULT_BRANCH:-master}")
|
||||
|
||||
export PATH="/usr/local/bin:$PATH"
|
||||
export WORKSPACE_DIR="${HOME}/workspace"
|
||||
mkdir -p "${WORKSPACE_DIR}"
|
||||
|
||||
export PATH="${WORKSPACE_DIR}/miniconda3/bin:$PATH"
|
||||
source "${WORKSPACE_DIR}"/miniconda3/bin/activate
|
||||
|
||||
# sanitize the input commit message and PR body here:
|
||||
|
||||
# trim all new lines from commit messages to avoid issues with batch environment
|
||||
# variable copying. see https://github.com/pytorch/pytorch/pull/80043#issuecomment-1167796028
|
||||
COMMIT_MESSAGES="${COMMIT_MESSAGES//[$'\n\r']}"
|
||||
|
||||
# then trim all special characters like single and double quotes to avoid unescaped inputs to
|
||||
# wreak havoc internally
|
||||
export COMMIT_MESSAGES="${COMMIT_MESSAGES//[\'\"]}"
|
||||
|
||||
python3 -mpip install dist/*.whl
|
||||
.jenkins/pytorch/macos-test.sh
|
||||
- run:
|
||||
name: Copy files for uploading test stats
|
||||
command: |
|
||||
# copy into a parent folder test-reports because we can't use CIRCLEI_BUILD_NUM in path when persisting to workspace
|
||||
mkdir -p test-reports/test-reports_${CIRCLE_BUILD_NUM}/test/test-reports
|
||||
cp -r test/test-reports test-reports/test-reports_${CIRCLE_BUILD_NUM}/test/test-reports
|
||||
- store_test_results:
|
||||
path: test/test-reports
|
||||
- persist_to_workspace:
|
||||
root: /Users/distiller/project/
|
||||
paths:
|
||||
- test-reports
|
||||
|
||||
upload_test_stats:
|
||||
machine: # executor type
|
||||
image: ubuntu-2004:202010-01 # # recommended linux image - includes Ubuntu 20.04, docker 19.03.13, docker-compose 1.27.4
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/workspace
|
||||
- run:
|
||||
name: upload
|
||||
command: |
|
||||
set -ex
|
||||
if [ -z ${AWS_ACCESS_KEY_FOR_OSSCI_ARTIFACT_UPLOAD} ]; then
|
||||
echo "No credentials found, cannot upload test stats (are you on a fork?)"
|
||||
exit 0
|
||||
fi
|
||||
cp -r ~/workspace/test-reports/* ~/project
|
||||
pip3 install requests==2.26 rockset==0.8.3 boto3==1.19.12 six==1.16.0
|
||||
export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_FOR_OSSCI_ARTIFACT_UPLOAD}
|
||||
export AWS_SECRET_ACCESS_KEY=${AWS_SECRET_KEY_FOR_OSSCI_ARTIFACT_UPLOAD}
|
||||
# i dont know how to get the run attempt number for reruns so default to 1
|
||||
python3 -m tools.stats.upload_test_stats --workflow-run-id "${CIRCLE_WORKFLOW_JOB_ID}" --workflow-run-attempt 1 --head-branch << pipeline.git.branch >> --circleci
|
||||
pytorch_macos_10_13_py3_test:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-test
|
||||
@ -302,6 +201,7 @@
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
export IN_CI=1
|
||||
export JOB_BASE_NAME=$CIRCLE_JOB
|
||||
|
||||
chmod a+x .jenkins/pytorch/macos-test.sh
|
||||
@ -314,6 +214,7 @@
|
||||
source /Users/distiller/workspace/miniconda3/bin/activate
|
||||
python3 -m pip install boto3==1.19.12
|
||||
|
||||
export IN_CI=1
|
||||
export JOB_BASE_NAME=$CIRCLE_JOB
|
||||
|
||||
# Using the same IAM user to write stats to our OSS bucket
|
||||
@ -339,6 +240,7 @@
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
export IN_CI=1
|
||||
export BUILD_LITE_INTERPRETER=1
|
||||
export JOB_BASE_NAME=$CIRCLE_JOB
|
||||
chmod a+x ${HOME}/project/.jenkins/pytorch/macos-lite-interpreter-build-test.sh
|
||||
@ -428,6 +330,9 @@
|
||||
output_image=$docker_image_libtorch_android_x86_32-gradle
|
||||
docker commit "$id_x86_32" ${output_image}
|
||||
time docker push ${output_image}
|
||||
- upload_binary_size_for_android_build:
|
||||
build_type: prebuilt
|
||||
artifacts: /home/circleci/workspace/build_android_artifacts/artifacts.tgz
|
||||
- store_artifacts:
|
||||
path: ~/workspace/build_android_artifacts/artifacts.tgz
|
||||
destination: artifacts.tgz
|
||||
@ -503,6 +408,9 @@
|
||||
output_image=${docker_image_libtorch_android_x86_32}-gradle
|
||||
docker commit "$id" ${output_image}
|
||||
time docker push ${output_image}
|
||||
- upload_binary_size_for_android_build:
|
||||
build_type: prebuilt-single
|
||||
artifacts: /home/circleci/workspace/build_android_x86_32_artifacts/artifacts.tgz
|
||||
- store_artifacts:
|
||||
path: ~/workspace/build_android_x86_32_artifacts/artifacts.tgz
|
||||
destination: artifacts.tgz
|
||||
@ -512,43 +420,10 @@
|
||||
macos:
|
||||
xcode: "12.5.1"
|
||||
steps:
|
||||
- run:
|
||||
name: checkout with retry
|
||||
command: |
|
||||
checkout() {
|
||||
set -ex
|
||||
# Workaround old docker images with incorrect $HOME
|
||||
# check https://github.com/docker/docker/issues/2968 for details
|
||||
if [ "${HOME}" = "/" ]
|
||||
then
|
||||
export HOME=$(getent passwd $(id -un) | cut -d: -f6)
|
||||
fi
|
||||
|
||||
mkdir -p ~/.ssh
|
||||
|
||||
echo 'github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
|
||||
' >> ~/.ssh/known_hosts
|
||||
|
||||
# use git+ssh instead of https
|
||||
git config --global url."ssh://git@github.com".insteadOf "https://github.com" || true
|
||||
git config --global gc.auto 0 || true
|
||||
|
||||
echo 'Cloning git repository'
|
||||
mkdir -p '/Users/distiller/project'
|
||||
cd '/Users/distiller/project'
|
||||
git clone "$CIRCLE_REPOSITORY_URL" .
|
||||
echo 'Checking out branch'
|
||||
git checkout --force -B "$CIRCLE_BRANCH" "$CIRCLE_SHA1"
|
||||
git --no-pager log --no-color -n 1 --format='HEAD is now at %h %s'
|
||||
}
|
||||
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
retry checkout
|
||||
- checkout
|
||||
- run_brew_for_ios_build
|
||||
- run:
|
||||
name: Setup Fastlane
|
||||
name: Run Fastlane
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
@ -556,11 +431,26 @@
|
||||
cd ${PROJ_ROOT}/ios/TestApp
|
||||
# install fastlane
|
||||
sudo gem install bundler && bundle install
|
||||
# install certificates
|
||||
echo ${IOS_CERT_KEY_2022} >> cert.txt
|
||||
base64 --decode cert.txt -o Certificates.p12
|
||||
rm cert.txt
|
||||
bundle exec fastlane install_root_cert
|
||||
bundle exec fastlane install_dev_cert
|
||||
# install the provisioning profile
|
||||
PROFILE=PyTorch_CI_2022.mobileprovision
|
||||
PROVISIONING_PROFILES=~/Library/MobileDevice/Provisioning\ Profiles
|
||||
mkdir -pv "${PROVISIONING_PROFILES}"
|
||||
cd "${PROVISIONING_PROFILES}"
|
||||
echo ${IOS_SIGN_KEY_2022} >> cert.txt
|
||||
base64 --decode cert.txt -o ${PROFILE}
|
||||
rm cert.txt
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
export IN_CI=1
|
||||
WORKSPACE=/Users/distiller/workspace
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
export TCLLIBPATH="/usr/local/lib"
|
||||
@ -613,12 +503,18 @@
|
||||
command: |
|
||||
set -e
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
PROFILE=PyTorch_CI_2022
|
||||
# run the ruby build script
|
||||
if ! [ -x "$(command -v xcodebuild)" ]; then
|
||||
echo 'Error: xcodebuild is not installed.'
|
||||
exit 1
|
||||
fi
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM}
|
||||
echo ${IOS_DEV_TEAM_ID}
|
||||
if [ ${IOS_PLATFORM} != "SIMULATOR" ]; then
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID}
|
||||
else
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM}
|
||||
fi
|
||||
if ! [ "$?" -eq "0" ]; then
|
||||
echo 'xcodebuild failed!'
|
||||
exit 1
|
||||
@ -641,13 +537,12 @@
|
||||
cd ${PROJ_ROOT}/ios/TestApp/benchmark
|
||||
mkdir -p ../models
|
||||
if [ ${USE_COREML_DELEGATE} == 1 ]; then
|
||||
pip install coremltools==5.0b5 protobuf==3.20.1 six==1.16.0
|
||||
pip install coremltools==5.0b5
|
||||
pip install six
|
||||
python coreml_backend.py
|
||||
else
|
||||
cd "${PROJ_ROOT}"
|
||||
python test/mobile/model_test/gen_test_model.py ios-test
|
||||
python trace_model.py
|
||||
fi
|
||||
cd "${PROJ_ROOT}/ios/TestApp/benchmark"
|
||||
if [ ${BUILD_LITE_INTERPRETER} == 1 ]; then
|
||||
echo "Setting up the TestApp for LiteInterpreter"
|
||||
ruby setup.rb --lite 1
|
||||
@ -655,10 +550,10 @@
|
||||
echo "Setting up the TestApp for Full JIT"
|
||||
ruby setup.rb
|
||||
fi
|
||||
cd "${PROJ_ROOT}/ios/TestApp"
|
||||
# instruments -s -devices
|
||||
if [ "${BUILD_LITE_INTERPRETER}" == 1 ]; then
|
||||
if [ "${USE_COREML_DELEGATE}" == 1 ]; then
|
||||
cd ${PROJ_ROOT}/ios/TestApp
|
||||
instruments -s -devices
|
||||
if [ ${BUILD_LITE_INTERPRETER} == 1 ]; then
|
||||
if [ ${USE_COREML_DELEGATE} == 1 ]; then
|
||||
fastlane scan --only_testing TestAppTests/TestAppTests/testCoreML
|
||||
else
|
||||
fastlane scan --only_testing TestAppTests/TestAppTests/testLiteInterpreter
|
||||
@ -685,7 +580,7 @@
|
||||
time docker pull ${DOCKER_IMAGE}:${DOCKER_TAG} >/dev/null
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}:${DOCKER_TAG})
|
||||
|
||||
echo "Do NOT merge main branch into $CIRCLE_BRANCH in environment $BUILD_ENVIRONMENT"
|
||||
echo "Do NOT merge master branch into $CIRCLE_BRANCH in environment $BUILD_ENVIRONMENT"
|
||||
|
||||
git submodule sync && git submodule update -q --init --recursive --depth 1 --jobs 0
|
||||
|
||||
@ -760,3 +655,27 @@
|
||||
set -e
|
||||
python3 -m pip install requests
|
||||
python3 ./.circleci/scripts/trigger_azure_pipeline.py
|
||||
|
||||
pytorch_doc_test:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-doc-test
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.7-gcc5.4"
|
||||
resource_class: medium
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
name: Doc test
|
||||
no_output_timeout: "30m"
|
||||
command: |
|
||||
set -ex
|
||||
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}:build-${DOCKER_TAG}-${CIRCLE_SHA1}
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && . ./.jenkins/pytorch/docs-test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
229
.circleci/verbatim-sources/job-specs/pytorch-job-specs.yml
Normal file
229
.circleci/verbatim-sources/job-specs/pytorch-job-specs.yml
Normal file
@ -0,0 +1,229 @@
|
||||
jobs:
|
||||
pytorch_linux_build:
|
||||
<<: *pytorch_params
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_linux_system_environment
|
||||
- optional_merge_target_branch
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"pure_torch"* ]]; then
|
||||
echo 'BUILD_CAFFE2=OFF' >> "${BASH_ENV}"
|
||||
fi
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
echo 'ATEN_THREADING=TBB' >> "${BASH_ENV}"
|
||||
echo 'USE_TBB=1' >> "${BASH_ENV}"
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
echo 'ATEN_THREADING=NATIVE' >> "${BASH_ENV}"
|
||||
fi
|
||||
echo "Parallel backend flags: "${PARALLEL_FLAGS}
|
||||
# Pull Docker image and run build
|
||||
echo "DOCKER_IMAGE: "${DOCKER_IMAGE}:${DOCKER_TAG}
|
||||
time docker pull ${DOCKER_IMAGE}:${DOCKER_TAG} >/dev/null
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}:${DOCKER_TAG})
|
||||
|
||||
git submodule sync && git submodule update -q --init --recursive --depth 1 --jobs 0
|
||||
|
||||
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
|
||||
|
||||
export COMMAND='((echo "sudo chown -R jenkins workspace && export JOB_BASE_NAME="$CIRCLE_JOB" && cd workspace && .jenkins/pytorch/build.sh && find ${BUILD_ROOT} -type f -name "*.a" -or -name "*.o" -delete") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
# Copy dist folder back
|
||||
docker cp $id:/var/lib/jenkins/workspace/dist /home/circleci/project/. || echo "Dist folder not found"
|
||||
|
||||
# Push intermediate Docker image for next phase to use
|
||||
if [ -z "${BUILD_ONLY}" ]; then
|
||||
# Note [Special build images]
|
||||
# The xla build uses the same docker image as
|
||||
# pytorch_linux_bionic_py3_6_clang9_build. In the push step, we have to
|
||||
# distinguish between them so the test can pick up the correct image.
|
||||
output_image=${DOCKER_IMAGE}:build-${DOCKER_TAG}-${CIRCLE_SHA1}
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-xla
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"libtorch"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-libtorch
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-paralleltbb
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-parallelnative
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-x86_64"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-x86_64
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-arm-v7a"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-arm-v7a
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-arm-v8a"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-arm-v8a
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-x86_32"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-x86_32
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-vulkan-x86_32"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-vulkan-x86_32
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"vulkan-linux"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-vulkan
|
||||
else
|
||||
export COMMIT_DOCKER_IMAGE=$output_image
|
||||
fi
|
||||
docker commit "$id" ${COMMIT_DOCKER_IMAGE}
|
||||
time docker push ${COMMIT_DOCKER_IMAGE}
|
||||
fi
|
||||
- run:
|
||||
name: upload build & binary data
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
cd /pytorch && export COMMIT_TIME=$(git log --max-count=1 --format=%ct || echo 0)
|
||||
python3 -mpip install requests && \
|
||||
SCRIBE_GRAPHQL_ACCESS_TOKEN=${SCRIBE_GRAPHQL_ACCESS_TOKEN} \
|
||||
python3 -m tools.stats.upload_binary_size_to_scuba || exit 0
|
||||
- store_artifacts:
|
||||
path: /home/circleci/project/dist
|
||||
|
||||
pytorch_linux_test:
|
||||
<<: *pytorch_params
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
name: Download Docker image
|
||||
no_output_timeout: "90m"
|
||||
command: |
|
||||
set -e
|
||||
export PYTHONUNBUFFERED=1
|
||||
if [[ "${DOCKER_IMAGE}" == *rocm3.9* ]]; then
|
||||
export DOCKER_TAG="f3d89a32912f62815e4feaeed47e564e887dffd6"
|
||||
fi
|
||||
# See Note [Special build images]
|
||||
output_image=${DOCKER_IMAGE}:build-${DOCKER_TAG}-${CIRCLE_SHA1}
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-xla
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"libtorch"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-libtorch
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-paralleltbb
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-parallelnative
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"vulkan-linux"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-vulkan
|
||||
else
|
||||
export COMMIT_DOCKER_IMAGE=$output_image
|
||||
fi
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
echo 'ATEN_THREADING=TBB' >> "${BASH_ENV}"
|
||||
echo 'USE_TBB=1' >> "${BASH_ENV}"
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
echo 'ATEN_THREADING=NATIVE' >> "${BASH_ENV}"
|
||||
fi
|
||||
echo "Parallel backend flags: "${PARALLEL_FLAGS}
|
||||
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
|
||||
# TODO: Make this less painful
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --gpus all --shm-size=2g -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"rocm"* ]]; then
|
||||
hostname
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --shm-size=8g --ipc=host --device /dev/kfd --device /dev/dri --group-add video -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
else
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --shm-size=1g --ipc=host -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
fi
|
||||
echo "id=${id}" >> "${BASH_ENV}"
|
||||
|
||||
- run:
|
||||
name: Check for no AVX instruction by default
|
||||
no_output_timeout: "20m"
|
||||
command: |
|
||||
set -e
|
||||
is_vanilla_build() {
|
||||
if [ "${BUILD_ENVIRONMENT}" == "pytorch-linux-bionic-py3.7-clang9-test" ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ "${BUILD_ENVIRONMENT}" == "pytorch-linux-xenial-py3.7-gcc5.4-test" ]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
if is_vanilla_build; then
|
||||
echo "apt-get update || apt-get install libgnutls30" | docker exec -u root -i "$id" bash
|
||||
echo "apt-get install -y qemu-user gdb" | docker exec -u root -i "$id" bash
|
||||
echo "cd workspace/build; qemu-x86_64 -g 2345 -cpu Broadwell -E ATEN_CPU_CAPABILITY=default ./bin/basic --gtest_filter=BasicTest.BasicTestCPU & gdb ./bin/basic -ex 'set pagination off' -ex 'target remote :2345' -ex 'continue' -ex 'bt' -ex='set confirm off' -ex 'quit \$_isvoid(\$_exitcode)'" | docker exec -u jenkins -i "$id" bash
|
||||
else
|
||||
echo "Skipping for ${BUILD_ENVIRONMENT}"
|
||||
fi
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "90m"
|
||||
command: |
|
||||
set -e
|
||||
|
||||
cat >docker_commands.sh \<<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
set -ex
|
||||
export SCRIBE_GRAPHQL_ACCESS_TOKEN="${SCRIBE_GRAPHQL_ACCESS_TOKEN}"
|
||||
export JOB_BASE_NAME="$CIRCLE_JOB"
|
||||
# temporary fix for https://github.com/pytorch/pytorch/issues/60746
|
||||
if [ -z "$CIRCLE_PR_NUMBER" ]; then
|
||||
if [[ $CIRCLE_BRANCH =~ .*pull.* ]]; then
|
||||
export PR_NUMBER="$(echo $CIRCLE_BRANCH | sed 's/[^0-9]//g')"
|
||||
export CIRCLE_PR_NUMBER="$PR_NUMBER"
|
||||
fi
|
||||
else
|
||||
export PR_NUMBER="$CIRCLE_PR_NUMBER"
|
||||
fi
|
||||
${PARALLEL_FLAGS}
|
||||
cd workspace
|
||||
EOL
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"multigpu"* ]]; then
|
||||
echo ".jenkins/pytorch/multigpu-test.sh" >> docker_commands.sh
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *onnx* ]]; then
|
||||
echo ".jenkins/caffe2/test.sh" >> docker_commands.sh
|
||||
else
|
||||
echo ".jenkins/pytorch/test.sh" >> docker_commands.sh
|
||||
fi
|
||||
echo "(cat docker_commands.sh | docker exec -u jenkins -i "$id" bash) 2>&1" > command.sh
|
||||
unbuffer bash command.sh | ts
|
||||
|
||||
- run:
|
||||
name: Report results
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
set -e
|
||||
# Retrieving test results should be done as very first step as command never fails
|
||||
# But is always executed if previous step fails for some reason
|
||||
echo "Retrieving test reports"
|
||||
docker cp $id:/var/lib/jenkins/workspace/test/test-reports ./ || echo 'No test reports found!'
|
||||
docker stats --all --no-stream
|
||||
|
||||
cat >docker_commands.sh \<<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
set -ex
|
||||
export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}
|
||||
export SCRIBE_GRAPHQL_ACCESS_TOKEN="${SCRIBE_GRAPHQL_ACCESS_TOKEN}"
|
||||
export CIRCLE_TAG="${CIRCLE_TAG:-}"
|
||||
export CIRCLE_SHA1="$CIRCLE_SHA1"
|
||||
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
|
||||
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
|
||||
export JOB_BASE_NAME="$CIRCLE_JOB"
|
||||
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
||||
cd workspace
|
||||
python -m tools.stats.print_test_stats --upload-to-s3 --compare-with-s3 test
|
||||
EOL
|
||||
echo "(cat docker_commands.sh | docker exec -u jenkins -e LANG=C.UTF-8 -i "$id" bash) 2>&1" > command.sh
|
||||
unbuffer bash command.sh | ts
|
||||
when: always
|
||||
- store_test_results:
|
||||
path: test-reports
|
||||
46
.circleci/verbatim-sources/workflows/workflows-promote.yml
Normal file
46
.circleci/verbatim-sources/workflows/workflows-promote.yml
Normal file
@ -0,0 +1,46 @@
|
||||
# Promotion workflow
|
||||
promote:
|
||||
jobs:
|
||||
# Requires manual approval by someone in org-member
|
||||
# CircleCI security context
|
||||
- promote_approval:
|
||||
context: org-member
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /v[0-9]+(\.[0-9]+)*/
|
||||
type: approval
|
||||
- promote_s3:
|
||||
context: org-member
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /v[0-9]+(\.[0-9]+)*/
|
||||
name: promote_s3_libtorch
|
||||
package_name: libtorch
|
||||
requires:
|
||||
- promote_approval
|
||||
- promote_s3:
|
||||
context: org-member
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /v[0-9]+(\.[0-9]+)*/
|
||||
name: promote_s3_torch
|
||||
package_name: torch
|
||||
requires:
|
||||
- promote_approval
|
||||
- promote_conda:
|
||||
context: org-member
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /v[0-9]+(\.[0-9]+)*/
|
||||
name: promote_conda_pytorch
|
||||
package_name: pytorch
|
||||
requires:
|
||||
- promote_approval
|
||||
3
.flake8
3
.flake8
@ -22,9 +22,6 @@ exclude =
|
||||
./docs/caffe2,
|
||||
./docs/cpp/src,
|
||||
./docs/src,
|
||||
./functorch/docs,
|
||||
./functorch/examples,
|
||||
./functorch/notebooks,
|
||||
./scripts,
|
||||
./test/generated_type_hints_smoketest.py,
|
||||
./third_party,
|
||||
|
||||
@ -1,30 +0,0 @@
|
||||
# 2020-11-12 Enabled ShellCheck on `.jenkins/pytorch`
|
||||
65d5004b09fd8d5deac173a3aaa259f46eaa0d67
|
||||
# 2021-01-20 Replaced ` ` with `...` in many doctests
|
||||
c147aa306c6386a753fdff24b48d04e803070a63
|
||||
# 2021-03-05 Removed all trailing whitespace
|
||||
8c798e062216278673a75bac0848ea69a8bd3f03
|
||||
# 2021-03-30 Normalized trailing newlines
|
||||
5bcbbf537327f6e8328289c25a3a453a2444d984
|
||||
# 2021-03-31 Autogenerated Markdown ToCs
|
||||
a74b10def961ab090385f291ee06e66db99c1a2f
|
||||
# 2021-04-02 Enabled more ShellCheck warnings
|
||||
09670c7d43b9abce862a6bf71d8cc89e64764bdb
|
||||
# 2021-04-08 Removed all non-breaking spaces
|
||||
cc11aaaa60aadf28e3ec278bce26a42c1cd68a4f
|
||||
# 2021-04-13 Expanded many wildcard imports
|
||||
4753100a3baa96273204c361c8452afb7b59836f
|
||||
# 2021-04-19 Removed all unqualified `noqa`
|
||||
e3900d2ba5c9f91a24a9ce34520794c8366d5c54
|
||||
# 2021-04-21 Removed all unqualified `type: ignore`
|
||||
75024e228ca441290b6a1c2e564300ad507d7af6
|
||||
# 2021-04-30 [PyTorch] Autoformat c10
|
||||
44cc873fba5e5ffc4d4d4eef3bd370b653ce1ce1
|
||||
# 2021-05-14 Removed all versionless Python shebangs
|
||||
2e26976ad3b06ce95dd6afccfdbe124802edf28f
|
||||
# 2021-06-07 Strictly typed everything in `.github` and `tools`
|
||||
737d920b21db9b4292d056ee1329945990656304
|
||||
# 2022-06-09 Apply clang-format to ATen headers
|
||||
95b15c266baaf989ef7b6bbd7c23a2d90bacf687
|
||||
# 2022-06-11 [lint] autoformat test/cpp and torch/csrc
|
||||
30fb2c4abaaaa966999eab11674f25b18460e609
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -2,5 +2,3 @@
|
||||
.circleci/config.yml linguist-generated=true
|
||||
.github/workflows/generated-*.yml linguist-generated=true
|
||||
.github/generated-* linguist-generated=true
|
||||
.github/scripts/gql_mocks.json linguist-generated=true
|
||||
third_party/LICENSES_BUNDLED.txt linguist-generated=true
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/ci-sev.md
vendored
4
.github/ISSUE_TEMPLATE/ci-sev.md
vendored
@ -1,12 +1,10 @@
|
||||
---
|
||||
name: "⚠️ CI SEV"
|
||||
name: "⚠️CI SEV"
|
||||
about: Tracking incidents for PyTorch's CI infra.
|
||||
---
|
||||
|
||||
> NOTE: Remember to label this issue with "`ci: sev`"
|
||||
|
||||
**MERGE BLOCKING** <!-- remove this line if you don't want this SEV to block merges -->
|
||||
|
||||
## Current Status
|
||||
*Status could be: preemptive, ongoing, mitigated, closed. Also tell people if they need to take action to fix it (i.e. rebase)*.
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -2,4 +2,4 @@ blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Questions
|
||||
url: https://discuss.pytorch.org/
|
||||
about: Ask questions and discuss with other PyTorch community members
|
||||
about: Ask questions and discuss with other pytorch community members
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
2
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@ -1,5 +1,5 @@
|
||||
name: 🚀 Feature request
|
||||
description: Submit a proposal/request for a new PyTorch feature
|
||||
description: Submit a proposal/request for a new pytorch feature
|
||||
|
||||
body:
|
||||
- type: textarea
|
||||
|
||||
7
.github/actionlint.yaml
vendored
7
.github/actionlint.yaml
vendored
@ -1,7 +1,5 @@
|
||||
self-hosted-runner:
|
||||
labels:
|
||||
- linux.20_04.4x
|
||||
- linux.20_04.16x
|
||||
- linux.large
|
||||
- linux.2xlarge
|
||||
- linux.4xlarge
|
||||
@ -11,8 +9,3 @@ self-hosted-runner:
|
||||
- windows.4xlarge
|
||||
- windows.8xlarge.nvidia.gpu
|
||||
- bm-runner
|
||||
- linux.rocm.gpu
|
||||
- macos-m1-12
|
||||
- macos-12-xl
|
||||
- macos-12
|
||||
- macos12.3-m1
|
||||
|
||||
76
.github/actions/build-android/action.yml
vendored
76
.github/actions/build-android/action.yml
vendored
@ -1,76 +0,0 @@
|
||||
name: build android
|
||||
|
||||
description: build android for a specific arch
|
||||
|
||||
inputs:
|
||||
arch:
|
||||
description: arch to build
|
||||
required: true
|
||||
arch-for-build-env:
|
||||
description: |
|
||||
arch to pass to build environment.
|
||||
This is currently different than the arch name we use elswhere, which
|
||||
should be fixed.
|
||||
required: true
|
||||
github-secret:
|
||||
description: github token
|
||||
required: true
|
||||
build-environment:
|
||||
required: true
|
||||
description: Top-level label for what's being built/tested.
|
||||
docker-image:
|
||||
required: true
|
||||
description: Name of the base docker image to build with.
|
||||
branch:
|
||||
required: true
|
||||
description: What branch we are building on.
|
||||
outputs:
|
||||
container_id:
|
||||
description: Docker container identifier used to build the artifacts
|
||||
value: ${{ steps.build.outputs.container_id }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build-${{ inputs.arch }}
|
||||
id: build
|
||||
shell: bash
|
||||
env:
|
||||
BRANCH: ${{ inputs.branch }}
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-${{ inputs.arch-for-build-env }}-build"
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
||||
DOCKER_IMAGE: ${{ inputs.docker-image }}
|
||||
MATRIX_ARCH: ${{ inputs.arch }}
|
||||
run: |
|
||||
# detached container should get cleaned up by teardown_ec2_linux
|
||||
set -exo pipefail
|
||||
export container_name
|
||||
container_name=$(docker run \
|
||||
-e BUILD_ENVIRONMENT \
|
||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
||||
-e AWS_DEFAULT_REGION \
|
||||
-e PR_NUMBER \
|
||||
-e SHA1 \
|
||||
-e BRANCH \
|
||||
-e SCCACHE_BUCKET \
|
||||
-e SKIP_SCCACHE_INITIALIZATION=1 \
|
||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
||||
--security-opt seccomp=unconfined \
|
||||
--cap-add=SYS_PTRACE \
|
||||
--tty \
|
||||
--detach \
|
||||
--user jenkins \
|
||||
-w /var/lib/jenkins/workspace \
|
||||
"${DOCKER_IMAGE}"
|
||||
)
|
||||
git submodule sync && git submodule update -q --init --recursive --depth 1 --jobs 0
|
||||
docker cp "${GITHUB_WORKSPACE}/." "${container_name}:/var/lib/jenkins/workspace"
|
||||
(echo "sudo chown -R jenkins . && .jenkins/pytorch/build.sh && find ${BUILD_ROOT} -type f -name "*.a" -or -name "*.o" -delete" | docker exec -u jenkins -i "${container_name}" bash) 2>&1
|
||||
|
||||
# Copy install binaries back
|
||||
mkdir -p "${GITHUB_WORKSPACE}/build_android_install_${MATRIX_ARCH}"
|
||||
docker cp "${container_name}:/var/lib/jenkins/workspace/build_android/install" "${GITHUB_WORKSPACE}/build_android_install_${MATRIX_ARCH}"
|
||||
echo "::set-output name=container_id::${container_name}"
|
||||
114
.github/actions/calculate-docker-image/action.yml
vendored
114
.github/actions/calculate-docker-image/action.yml
vendored
@ -1,114 +0,0 @@
|
||||
name: Calculate docker image
|
||||
|
||||
description: Determine docker image to pull, building a new one if necessary.
|
||||
|
||||
inputs:
|
||||
docker-image-name:
|
||||
description: The name of a docker image, like `pytorch-linux-xenial-py3.7-gcc7`
|
||||
required: true
|
||||
xla:
|
||||
description: |
|
||||
Whether or not to use a pre-build XLA docker image.
|
||||
Note that this is a string, either "true" or "false" due to GHA limitations.
|
||||
required: false
|
||||
always-rebuild:
|
||||
description: If set to any value, always build a fresh docker image.
|
||||
required: false
|
||||
pull:
|
||||
description: If set to any value, run `docker pull`` on the calculated image.
|
||||
required: false
|
||||
skip_push:
|
||||
description: If set to true value, skip will be pushed, default is to skip so that pushing will be explicit
|
||||
required: false
|
||||
default: "true"
|
||||
force_push:
|
||||
description: If set to any value, always run the push
|
||||
required: false
|
||||
push-ghcr-image:
|
||||
description: If set to any value, push docker image to the ghcr.io.
|
||||
required: false
|
||||
|
||||
outputs:
|
||||
docker-image:
|
||||
description: The docker image to use for the rest of the workflow
|
||||
value: ${{ steps.calculate-tag.outputs.docker-image }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Calculate docker image tag
|
||||
shell: bash
|
||||
id: calculate-tag
|
||||
env:
|
||||
IS_XLA: ${{ inputs.xla == 'true' && 'true' || '' }}
|
||||
XLA_IMAGE_TAG: v0.4
|
||||
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/${{ inputs.docker-image-name }}
|
||||
run: |
|
||||
if [ -n "${IS_XLA}" ]; then
|
||||
echo "XLA workflow uses pre-built test image at ${XLA_IMAGE_TAG}"
|
||||
DOCKER_TAG=$(git rev-parse HEAD:.circleci/docker)
|
||||
echo "::set-output name=docker-tag::${DOCKER_TAG}"
|
||||
echo "::set-output name=docker-image::${DOCKER_IMAGE_BASE}:${XLA_IMAGE_TAG}"
|
||||
else
|
||||
DOCKER_TAG=$(git rev-parse HEAD:.circleci/docker)
|
||||
echo "::set-output name=docker-tag::${DOCKER_TAG}"
|
||||
echo "::set-output name=docker-image::${DOCKER_IMAGE_BASE}:${DOCKER_TAG}"
|
||||
fi
|
||||
|
||||
- name: Check if image should be built
|
||||
shell: bash
|
||||
id: check
|
||||
if: ${{ !inputs.always-rebuild }}
|
||||
env:
|
||||
BASE_REVISION: ${{ github.event.pull_request.base.sha || github.sha }}
|
||||
DOCKER_IMAGE: ${{ steps.calculate-tag.outputs.docker-image }}
|
||||
DOCKER_TAG: ${{ steps.calculate-tag.outputs.docker-tag }}
|
||||
DOCKER_FORCE_PUSH: ${{ inputs.force_push }}
|
||||
run: |
|
||||
set -x
|
||||
# Check if image already exists, if it does then skip building it
|
||||
if docker manifest inspect "${DOCKER_IMAGE}"; then
|
||||
exit 0
|
||||
fi
|
||||
if [[ "$BASE_REVISION" = "$(git rev-parse HEAD)" ]]; then
|
||||
# if we're on the base branch then use the parent commit
|
||||
MERGE_BASE=$(git rev-parse HEAD~)
|
||||
else
|
||||
# otherwise we're on a PR, so use the most recent base commit
|
||||
MERGE_BASE=$(git merge-base HEAD "$BASE_REVISION")
|
||||
fi
|
||||
# Covers the case where a previous tag doesn't exist for the tree
|
||||
# this is only really applicable on trees that don't have `.circleci/docker` at its merge base, i.e. nightly
|
||||
if ! git rev-parse "$MERGE_BASE:.circleci/docker"; then
|
||||
echo "Directory '.circleci/docker' not found in commit $MERGE_BASE, you should probably rebase onto a more recent commit"
|
||||
exit 1
|
||||
fi
|
||||
PREVIOUS_DOCKER_TAG=$(git rev-parse "$MERGE_BASE:.circleci/docker")
|
||||
# If no image exists but the hash is the same as the previous hash then we should error out here
|
||||
if [[ "${PREVIOUS_DOCKER_TAG}" = "${DOCKER_TAG}" ]]; then
|
||||
echo "WARNING: Something has gone wrong and the previous image isn't available for the merge-base of your branch"
|
||||
echo " Will re-build docker image to store in local cache, TTS may be longer"
|
||||
# NOTE: DOCKER_FORCE_PUSH will always be set to true for docker-builds.yml
|
||||
if [[ "${DOCKER_FORCE_PUSH}" != "true" ]]; then
|
||||
# In order to avoid a stampeding herd of jobs trying to push all at once we set it to
|
||||
# skip the push. If this is negatively affecting TTS across the board the suggestion
|
||||
# should be to run the docker-builds.yml workflow to generate the correct docker builds
|
||||
echo ::set-output name=skip_push::true
|
||||
fi
|
||||
fi
|
||||
echo ::set-output name=rebuild::yes
|
||||
|
||||
- name: Build and push docker image
|
||||
if: inputs.always-rebuild || steps.check.outputs.rebuild
|
||||
env:
|
||||
IMAGE_NAME: ${{inputs.docker-image-name}}
|
||||
DOCKER_SKIP_S3_UPLOAD: "1"
|
||||
# Skip push if we don't need it, or if specified in the inputs
|
||||
DOCKER_SKIP_PUSH: ${{ steps.check.outputs.skip_push || inputs.skip_push }}
|
||||
DOCKER_TAG: ${{ steps.calculate-tag.outputs.docker-tag }}
|
||||
PUSH_GHCR_IMAGE: ${{ inputs.push-ghcr-image }}
|
||||
GHCR_PAT: ${{ env.GHCR_PAT }}
|
||||
working-directory: .circleci/docker
|
||||
shell: bash
|
||||
run: |
|
||||
./build_docker.sh
|
||||
44
.github/actions/checkout-pytorch/action.yml
vendored
44
.github/actions/checkout-pytorch/action.yml
vendored
@ -1,44 +0,0 @@
|
||||
name: Checkout PyTorch
|
||||
|
||||
description: Clean workspace and check out PyTorch
|
||||
|
||||
inputs:
|
||||
no-sudo:
|
||||
description: If set to any value, don't use sudo to clean the workspace
|
||||
required: false
|
||||
submodules:
|
||||
description: Works as stated in actions/checkout, but the default value is recursive
|
||||
required: false
|
||||
default: recursive
|
||||
fetch-depth:
|
||||
description: Works as stated in actions/checkout, but the default value is 0
|
||||
required: false
|
||||
default: "0"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Clean workspace
|
||||
shell: bash
|
||||
env:
|
||||
NO_SUDO: ${{ inputs.no-sudo }}
|
||||
run: |
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
echo "${GITHUB_WORKSPACE}"
|
||||
if [ -z "${NO_SUDO}" ]; then
|
||||
retry sudo rm -rf "${GITHUB_WORKSPACE}"
|
||||
else
|
||||
retry rm -rf "${GITHUB_WORKSPACE}"
|
||||
fi
|
||||
mkdir "${GITHUB_WORKSPACE}"
|
||||
|
||||
- name: Checkout PyTorch
|
||||
uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
|
||||
# --depth=1 for speed, manually fetch history and other refs as necessary
|
||||
fetch-depth: ${{ inputs.fetch-depth }}
|
||||
submodules: ${{ inputs.submodules }}
|
||||
quiet-checkout: true
|
||||
11
.github/actions/chown-workspace/action.yml
vendored
11
.github/actions/chown-workspace/action.yml
vendored
@ -1,11 +0,0 @@
|
||||
name: Chown workspace
|
||||
|
||||
description: Ensure that the working directory gets chowned back to the current user
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- run: docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
||||
shell: bash
|
||||
env:
|
||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
||||
@ -1,34 +0,0 @@
|
||||
name: Download PyTorch Build Artifacts
|
||||
|
||||
description: Download and unzip artifacts from a previous PyTorch build.
|
||||
|
||||
inputs:
|
||||
name:
|
||||
description: Name of what artifact to download
|
||||
required: true
|
||||
use-gha:
|
||||
description: If set to any value, use GHA to download the artifact. Otherwise use s3.
|
||||
required: false
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Download PyTorch Build Artifacts from S3
|
||||
if: ${{ !inputs.use-gha }}
|
||||
uses: seemethere/download-artifact-s3@v4
|
||||
with:
|
||||
name: ${{ inputs.name }}
|
||||
|
||||
- name: Download PyTorch Build Artifacts from GHA
|
||||
if: inputs.use-gha
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: ${{ inputs.name }}
|
||||
|
||||
- name: Unzip artifacts
|
||||
shell: bash
|
||||
run: unzip -o artifacts.zip
|
||||
|
||||
- name: Output disk space left
|
||||
shell: bash
|
||||
run: df -H
|
||||
60
.github/actions/filter-test-configs/action.yml
vendored
60
.github/actions/filter-test-configs/action.yml
vendored
@ -1,60 +0,0 @@
|
||||
name: Filter test configs matrix
|
||||
|
||||
description: |
|
||||
Apply filter to the test configs matrix to keep only entries specified
|
||||
by the PR test-config labels. If no test-config label is set, the same
|
||||
test configs matrix is returned untouched.
|
||||
|
||||
inputs:
|
||||
github-token:
|
||||
description: GITHUB_TOKEN
|
||||
required: true
|
||||
test-matrix:
|
||||
required: true
|
||||
type: string
|
||||
description: JSON description of what test configs to run.
|
||||
|
||||
outputs:
|
||||
test-matrix:
|
||||
description: The filtered test configs matrix.
|
||||
value: ${{ steps.filter.outputs.test-matrix }}
|
||||
is-test-matrix-empty:
|
||||
description: True if the filtered test configs matrix is empty. False otherwise.
|
||||
value: ${{ steps.filter.outputs.is-test-matrix-empty }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: nick-fields/retry@71062288b76e2b6214ebde0e673ce0de1755740a
|
||||
name: Setup dependencies
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 10
|
||||
max_attempts: 5
|
||||
retry_wait_seconds: 30
|
||||
command: |
|
||||
set -eux
|
||||
python3 -m pip install requests==2.26.0 pyyaml==6.0
|
||||
|
||||
- name: Parse ref
|
||||
shell: bash
|
||||
id: parse-ref
|
||||
run: .github/scripts/parse_ref.py
|
||||
|
||||
- name: Select all requested test configurations
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||
id: filter
|
||||
run: |
|
||||
.github/scripts/filter_test_configs.py \
|
||||
--test-matrix "${{ inputs.test-matrix }}" \
|
||||
--pr-number "${{ github.event.pull_request.number }}" \
|
||||
--tag "${{ steps.parse-ref.outputs.tag }}"
|
||||
|
||||
- name: Print the filtered test matrix
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ steps.filter.outputs.test-matrix }}"
|
||||
31
.github/actions/get-workflow-job-id/action.yml
vendored
31
.github/actions/get-workflow-job-id/action.yml
vendored
@ -1,31 +0,0 @@
|
||||
name: Get workflow job id
|
||||
|
||||
description: Get the ID of the workflow job that is currently running.
|
||||
|
||||
inputs:
|
||||
github-token:
|
||||
description: GITHUB_TOKEN
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
job-id:
|
||||
description: The retrieved workflow job id
|
||||
value: ${{ steps.get-job-id.outputs.job-id }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- uses: nick-fields/retry@7d4a37704547a311dbb66ebdf5b23ec19374a767
|
||||
id: get-job-id
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||
with:
|
||||
shell: bash
|
||||
timeout_minutes: 10
|
||||
max_attempts: 5
|
||||
retry_wait_seconds: 30
|
||||
command: |
|
||||
set -eux
|
||||
python3 -m pip install requests==2.26.0
|
||||
GHA_WORKFLOW_JOB_ID=$(python3 .github/scripts/get_workflow_job_id.py "${GITHUB_RUN_ID}" "${RUNNER_NAME}")
|
||||
echo "::set-output name=job-id::${GHA_WORKFLOW_JOB_ID}"
|
||||
48
.github/actions/setup-linux/action.yml
vendored
48
.github/actions/setup-linux/action.yml
vendored
@ -1,48 +0,0 @@
|
||||
name: Setup Linux
|
||||
|
||||
description: Set up Docker workspace on EC2
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
|
||||
- name: Start docker if docker deamon is not running
|
||||
shell: bash
|
||||
run: |
|
||||
if systemctl is-active --quiet docker; then
|
||||
echo "Docker daemon is running...";
|
||||
else
|
||||
echo "Starting docker deamon..." && sudo systemctl start docker;
|
||||
fi
|
||||
|
||||
- name: Log in to ECR
|
||||
shell: bash
|
||||
env:
|
||||
AWS_RETRY_MODE: standard
|
||||
AWS_MAX_ATTEMPTS: "5"
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
run: |
|
||||
AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
retry () { "$@" || (sleep 1 && "$@") || (sleep 2 && "$@") }
|
||||
retry aws ecr get-login-password --region "$AWS_DEFAULT_REGION" | docker login --username AWS \
|
||||
--password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com"
|
||||
|
||||
- name: Preserve github env variables for use in docker
|
||||
shell: bash
|
||||
run: |
|
||||
env | grep '^GITHUB' >> "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
65
.github/actions/setup-rocm/action.yml
vendored
65
.github/actions/setup-rocm/action.yml
vendored
@ -1,65 +0,0 @@
|
||||
name: Setup ROCm host
|
||||
|
||||
description: Set up ROCm host for CI
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Set DOCKER_HOST
|
||||
shell: bash
|
||||
run: echo "DOCKER_HOST=unix:///run/user/$(id -u)/docker.sock" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Runner health check system info
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
cat /etc/os-release || true
|
||||
cat /etc/apt/sources.list.d/rocm.list || true
|
||||
cat /opt/rocm/.info/version || true
|
||||
whoami
|
||||
|
||||
- name: Runner health check rocm-smi
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: Runner health check rocminfo
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
rocminfo
|
||||
|
||||
- name: Runner health check GPU count
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
ngpu=$(rocminfo | grep -c -E 'Name:.*\sgfx')
|
||||
if [[ "x$ngpu" != "x2" && "x$ngpu" != "x4" ]]; then
|
||||
echo "Failed to detect GPUs on the runner"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Runner health check disconnect on failure
|
||||
if: ${{ failure() }}
|
||||
shell: bash
|
||||
run: |
|
||||
killall runsvc.sh
|
||||
|
||||
- name: Preserve github env variables for use in docker
|
||||
shell: bash
|
||||
run: |
|
||||
env | grep '^GITHUB' >> "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}"
|
||||
|
||||
- name: ROCm set GPU_FLAG
|
||||
shell: bash
|
||||
run: |
|
||||
# Examine the runner name. If it ends with "-2", this is the second runner on the host.
|
||||
if [[ ${{ runner.name }} == *-2 ]]; then
|
||||
# select the last two GPUs on the host
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri/renderD130 --device=/dev/dri/renderD131 --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
else
|
||||
# select the first two GPUs on the host
|
||||
echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri/renderD128 --device=/dev/dri/renderD129 --group-add video --group-add daemon" >> "${GITHUB_ENV}"
|
||||
fi
|
||||
65
.github/actions/setup-win/action.yml
vendored
65
.github/actions/setup-win/action.yml
vendored
@ -1,65 +0,0 @@
|
||||
name: Setup Windows
|
||||
|
||||
description: Set up for windows jobs
|
||||
|
||||
inputs:
|
||||
cuda-version:
|
||||
description: which cuda version to install, 'cpu' for none
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
function get_ec2_metadata() {
|
||||
# Pulled from instance metadata endpoint for EC2
|
||||
# see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
category=$1
|
||||
curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
|
||||
}
|
||||
echo "ami-id: $(get_ec2_metadata ami-id)"
|
||||
echo "instance-id: $(get_ec2_metadata instance-id)"
|
||||
echo "instance-type: $(get_ec2_metadata instance-type)"
|
||||
echo "system info $(uname -a)"
|
||||
|
||||
# Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
|
||||
- name: Enable long paths on Windows
|
||||
shell: powershell
|
||||
run: |
|
||||
Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
|
||||
|
||||
# Since it's just a defensive command, the workflow should continue even the command fails
|
||||
- name: Disables Windows Defender scheduled and real-time scanning for files in pytorch directory.
|
||||
shell: powershell
|
||||
run: |
|
||||
Add-MpPreference -ExclusionPath $(Get-Location).tostring() -ErrorAction Ignore
|
||||
|
||||
- name: Install Visual Studio 2019 toolchain
|
||||
shell: powershell
|
||||
env:
|
||||
VS_VERSION: "16.8.6"
|
||||
INSTALL_WINDOWS_SDK: "1"
|
||||
run: |
|
||||
.\.circleci\scripts\vs_install.ps1
|
||||
|
||||
- name: Install CUDA and CUDNN
|
||||
shell: bash
|
||||
if: inputs.cuda-version != 'cpu'
|
||||
env:
|
||||
CUDA_VERSION: ${{ inputs.cuda-version }}
|
||||
run: |
|
||||
.circleci/scripts/windows_cuda_install.sh
|
||||
.circleci/scripts/windows_cudnn_install.sh
|
||||
|
||||
- name: Setup Python3
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.x"
|
||||
cache: pip
|
||||
cache-dependency-path: |
|
||||
**/requirements.txt
|
||||
**/.circleci/docker/requirements-ci.txt
|
||||
**/.github/requirements-gha-cache.txt
|
||||
33
.github/actions/teardown-win/action.yml
vendored
33
.github/actions/teardown-win/action.yml
vendored
@ -1,33 +0,0 @@
|
||||
name: Teardown Windows
|
||||
|
||||
description: Set up Docker workspace on linux
|
||||
|
||||
inputs:
|
||||
extra-delete-dir:
|
||||
description: If set, cleaning up the workspace will delete this too
|
||||
required: false
|
||||
default: ""
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Wait until all sessions have drained
|
||||
shell: powershell
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\wait_for_ssh_to_drain.ps1
|
||||
|
||||
- name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
|
||||
shell: powershell
|
||||
if: always()
|
||||
run: |
|
||||
.github\scripts\kill_active_ssh_sessions.ps1
|
||||
|
||||
- name: Cleanup workspace
|
||||
if: always()
|
||||
shell: bash
|
||||
env:
|
||||
EXTRA_DELETE_DIR: ${{ inputs.extra-delete-dir }}
|
||||
run: |
|
||||
[ ! -z "${EXTRA_DELETE_DIR}" ] || rm -rf "${EXTRA_DELETE_DIR}"
|
||||
rm -rf ./*
|
||||
41
.github/actions/test-pytorch-binary/action.yml
vendored
41
.github/actions/test-pytorch-binary/action.yml
vendored
@ -1,41 +0,0 @@
|
||||
name: Test pytorch binary
|
||||
|
||||
description: Pulls the docker image and tests the pytorch binary using it. All env variable referenced in the "Test PyTorch binary" step must be set in the GITHUB_ENV file
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Test PyTorch binary
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
# shellcheck disable=SC2086,SC2090
|
||||
container_name=$(docker run \
|
||||
${GPU_FLAG:-} \
|
||||
-e BINARY_ENV_FILE \
|
||||
-e BUILDER_ROOT \
|
||||
-e BUILD_ENVIRONMENT \
|
||||
-e BUILD_SPLIT_CUDA \
|
||||
-e DESIRED_CUDA \
|
||||
-e DESIRED_DEVTOOLSET \
|
||||
-e DESIRED_PYTHON \
|
||||
-e GITHUB_ACTIONS \
|
||||
-e GPU_ARCH_TYPE \
|
||||
-e GPU_ARCH_VERSION \
|
||||
-e LIBTORCH_VARIANT \
|
||||
-e PACKAGE_TYPE \
|
||||
-e PYTORCH_FINAL_PACKAGE_DIR \
|
||||
-e PYTORCH_ROOT \
|
||||
-e SKIP_ALL_TESTS \
|
||||
--tty \
|
||||
--detach \
|
||||
-v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \
|
||||
-v "${GITHUB_WORKSPACE}/builder:/builder" \
|
||||
-v "${RUNNER_TEMP}/artifacts:/final_pkgs" \
|
||||
-w / \
|
||||
"${DOCKER_IMAGE}"
|
||||
)
|
||||
docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh"
|
||||
# Generate test script
|
||||
docker exec -t -w "${PYTORCH_ROOT}" -e OUTPUT_SCRIPT="/run.sh" "${container_name}" bash -c "bash .circleci/scripts/binary_linux_test.sh"
|
||||
docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash -x /run.sh"
|
||||
131
.github/actions/upload-test-artifacts/action.yml
vendored
131
.github/actions/upload-test-artifacts/action.yml
vendored
@ -1,131 +0,0 @@
|
||||
name: Upload test artifacts
|
||||
|
||||
description: Upload various artifacts produced by our testing process
|
||||
|
||||
inputs:
|
||||
use-gha:
|
||||
description: If set to any value, upload GHA. Otherwise upload to S3.
|
||||
required: false
|
||||
file-suffix:
|
||||
description: |
|
||||
Suffix to add to the filename of the artifacts. This should include the
|
||||
workflow job id, see [Job id in artifacts].
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# Mac/Linux zip
|
||||
- name: Zip JSONs for upload
|
||||
if: runner.os != 'Windows' && !inputs.use-gha
|
||||
shell: bash
|
||||
env:
|
||||
FILE_SUFFIX: ${{ inputs.file-suffix }}
|
||||
run: |
|
||||
# Remove any previous test jsons if they exist
|
||||
rm -f test-jsons-*.zip
|
||||
zip -r "test-jsons-${FILE_SUFFIX}.zip" test -i '*.json'
|
||||
|
||||
- name: Zip test reports for upload
|
||||
if: runner.os != 'Windows' && !inputs.use-gha
|
||||
shell: bash
|
||||
env:
|
||||
FILE_SUFFIX: ${{ inputs.file-suffix }}
|
||||
run: |
|
||||
# Remove any previous test reports if they exist
|
||||
rm -f test-reports-*.zip
|
||||
zip -r "test-reports-${FILE_SUFFIX}.zip" test -i '*.xml'
|
||||
|
||||
- name: Zip usage log for upload
|
||||
if: runner.os != 'Windows' && !inputs.use-gha
|
||||
shell: bash
|
||||
env:
|
||||
FILE_SUFFIX: ${{ inputs.file-suffix }}
|
||||
run: |
|
||||
# Remove any previous test reports if they exist
|
||||
rm -f usage-log-*.zip
|
||||
# this workflow is also run in bazel build test, but we dont generate usage reports for it
|
||||
# so check to see if the file exists first
|
||||
if [ -f 'usage_log.txt' ]; then
|
||||
zip "usage-log-${FILE_SUFFIX}.zip" 'usage_log.txt'
|
||||
fi
|
||||
|
||||
# Windows zip
|
||||
- name: Zip JSONs for upload
|
||||
if: runner.os == 'Windows' && !inputs.use-gha
|
||||
shell: powershell
|
||||
env:
|
||||
FILE_SUFFIX: ${{ inputs.file-suffix }}
|
||||
run: |
|
||||
# -ir => recursive include all files in pattern
|
||||
7z a "test-jsons-$Env:FILE_SUFFIX.zip" -ir'!test\*.json'
|
||||
|
||||
- name: Zip test reports for upload
|
||||
if: runner.os == 'Windows' && !inputs.use-gha
|
||||
shell: powershell
|
||||
env:
|
||||
FILE_SUFFIX: ${{ inputs.file-suffix }}
|
||||
run: |
|
||||
# -ir => recursive include all files in pattern
|
||||
7z a "test-reports-$Env:FILE_SUFFIX.zip" -ir'!test\*.xml'
|
||||
|
||||
- name: Zip usage log for upload
|
||||
if: runner.os == 'Windows' && !inputs.use-gha
|
||||
shell: powershell
|
||||
env:
|
||||
FILE_SUFFIX: ${{ inputs.file-suffix }}
|
||||
run: |
|
||||
# -ir => recursive include all files in pattern
|
||||
7z a "usage-log-$Env:FILE_SUFFIX.zip" 'usage_log.txt'
|
||||
|
||||
# S3 upload
|
||||
- name: Store Test Downloaded JSONs on S3
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
if: ${{ !inputs.use-gha }}
|
||||
with:
|
||||
s3-prefix: |
|
||||
${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact
|
||||
retention-days: 14
|
||||
if-no-files-found: warn
|
||||
path: test-jsons-*.zip
|
||||
|
||||
- name: Store Test Reports on S3
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
if: ${{ !inputs.use-gha }}
|
||||
with:
|
||||
s3-prefix: |
|
||||
${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: test-reports-*.zip
|
||||
|
||||
- name: Store Usage Logs on S3
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
if: ${{ !inputs.use-gha }}
|
||||
with:
|
||||
s3-prefix: |
|
||||
${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact
|
||||
retention-days: 14
|
||||
if-no-files-found: ignore
|
||||
path: usage-log-*.zip
|
||||
|
||||
# GHA upload
|
||||
- name: Store Test Downloaded JSONs on Github
|
||||
uses: actions/upload-artifact@v2
|
||||
if: inputs.use-gha
|
||||
with:
|
||||
# Add the run attempt, see [Artifact run attempt]
|
||||
name: test-jsons-runattempt${{ github.run_attempt }}-${{ inputs.file-suffix }}.zip
|
||||
retention-days: 14
|
||||
if-no-files-found: warn
|
||||
path: test/**/*.json
|
||||
|
||||
- name: Store Test Reports on Github
|
||||
uses: actions/upload-artifact@v2
|
||||
if: inputs.use-gha
|
||||
with:
|
||||
# Add the run attempt, see [Artifact run attempt]
|
||||
name: test-reports-runattempt${{ github.run_attempt }}-${{ inputs.file-suffix }}.zip
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: test/**/*.xml
|
||||
27
.github/auto_request_review.yml
vendored
27
.github/auto_request_review.yml
vendored
@ -1,27 +0,0 @@
|
||||
# Documented at https://github.com/necojackarc/auto-request-review
|
||||
reviewers:
|
||||
groups:
|
||||
symbolic-shapes:
|
||||
- ezyang
|
||||
- Chillee
|
||||
- wconstab
|
||||
- anjali411
|
||||
- albanD
|
||||
- Krovatkin
|
||||
- miladm
|
||||
|
||||
per_author:
|
||||
symbolic-shapes:
|
||||
- symbolic-shapes
|
||||
- antoniojkim
|
||||
|
||||
files:
|
||||
# none yet, TODO: migrate CODEOWNERS here
|
||||
|
||||
options:
|
||||
ignore_draft: true
|
||||
ignored_keywords:
|
||||
- DO NOT REVIEW
|
||||
# Just manually setup a self-referential per_author rule if you
|
||||
# want group assignment
|
||||
enable_group_assignment: false
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user