mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-25 16:14:55 +08:00
Compare commits
43 Commits
sqzhang_fl
...
release/1.
| Author | SHA1 | Date | |
|---|---|---|---|
| 8d365ae940 | |||
| bc2c6edaf1 | |||
| 46f85865c0 | |||
| a556333dfa | |||
| 3c14fe2151 | |||
| 055052bf64 | |||
| 68ef2a2188 | |||
| 9647fb7d18 | |||
| e4944871c8 | |||
| bbf2c0e3c6 | |||
| e6e8877bc2 | |||
| eaa80c6fd8 | |||
| 7fa092949e | |||
| 74cd18623e | |||
| dad4c2d032 | |||
| 565742cb63 | |||
| 7acb591cf9 | |||
| a5d5a6ad4f | |||
| ea5089751f | |||
| d216c83667 | |||
| f7e0ca546c | |||
| 89ee69e173 | |||
| e0aad8e864 | |||
| 28ad47f553 | |||
| 2cc3c2ef38 | |||
| b0037f707f | |||
| b6a3176c1c | |||
| 5fac320809 | |||
| 1f406fe91d | |||
| 6a46b2e2aa | |||
| 503a0923d3 | |||
| 6641e9b75f | |||
| 4f9f0e7a13 | |||
| 0ea924fc98 | |||
| 5a78725c29 | |||
| f72151b900 | |||
| 8380187819 | |||
| 7cc129e60c | |||
| ff6c348762 | |||
| 03a283b2b1 | |||
| 614e765575 | |||
| 7b0e140ecc | |||
| 3fab33e1c9 |
63
.azure_pipelines/build-pipeline.yml
Normal file
63
.azure_pipelines/build-pipeline.yml
Normal file
@ -0,0 +1,63 @@
|
||||
# PyTorch CI Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on select configurations
|
||||
# 2) runs only TestTorch unit tests.
|
||||
|
||||
stages:
|
||||
- stage: 'Build'
|
||||
displayName: 'Build PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
82
.azure_pipelines/daily-pipeline.yml
Normal file
82
.azure_pipelines/daily-pipeline.yml
Normal file
@ -0,0 +1,82 @@
|
||||
# PyTorch Daily Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on all available configurations
|
||||
# 2) runs all PyTorch unit tests
|
||||
|
||||
stages:
|
||||
- stage: 'BuildTest'
|
||||
displayName: 'Build and Test PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
@ -0,0 +1,134 @@
|
||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
container_endpoint: ''
|
||||
os: ''
|
||||
cuda: ''
|
||||
is_ci_build: False
|
||||
is_official_build: False
|
||||
is_daily_build: False
|
||||
build_stage: False
|
||||
verify_stage: False
|
||||
publish_stage: False
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 300
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
DECODE_PERCENTS: false
|
||||
container:
|
||||
image: $[variables['container_image']]
|
||||
endpoint: ${{parameters.container_endpoint}}
|
||||
|
||||
steps:
|
||||
# Build stage
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Set up environment variables for specific pipeline build
|
||||
- template: set-environment-variables.yml
|
||||
parameters:
|
||||
os: ${{ parameters.os}}
|
||||
cuda: ${{ parameters.cuda}}
|
||||
is_official_build: ${{ parameters.is_official_build}}
|
||||
|
||||
# Sync and update PyTorch submodules
|
||||
- bash: git submodule update --init --recursive --jobs 0
|
||||
displayName: Update PyTorch submodules
|
||||
|
||||
# Build PyTorch and run unit tests - no packaging
|
||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
||||
# Build PyTorch from source in develop mode
|
||||
- bash: python setup.py develop
|
||||
displayName: Build PyTorch from source
|
||||
|
||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
||||
- bash: python test/test_torch.py TestTorch
|
||||
displayName: Run TestTorch unit tests
|
||||
|
||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
||||
# Run all unit tests to demonstrate successful PyTorch build
|
||||
- bash: python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
||||
displayName: Run all unit tests
|
||||
|
||||
# Run ComponentGovernance
|
||||
- task: ComponentGovernanceComponentDetection@0
|
||||
inputs:
|
||||
scanType: 'Register'
|
||||
verbosity: 'Verbose'
|
||||
alertWarningLevel: 'High'
|
||||
|
||||
# Build PyTorch and produce artifacts for verification stage
|
||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
||||
# Build PyTorch from source in install mode and exclude test binaries
|
||||
- bash: python setup.py install
|
||||
displayName: Build PyTorch from source without test binaries
|
||||
|
||||
# Package PyTorch Wheel
|
||||
- bash: python setup.py bdist_wheel
|
||||
displayName: Package PyTorch Wheel
|
||||
|
||||
# Publish PyTorch Wheel
|
||||
- task: PublishPipelineArtifact@1
|
||||
inputs:
|
||||
targetPath: $(Build.SourcesDirectory)/dist/
|
||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
||||
|
||||
# Verification stage
|
||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)/verify
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Install PyTorch Wheel on Windows
|
||||
- bash: python -m pip install $(Build.SourcesDirectory)/verify/torch*linux*.whl
|
||||
displayName: Install PyTorch Wheel
|
||||
|
||||
# Ensure PyTorch installed correctly from produced wheel
|
||||
- bash: |
|
||||
cd $(Build.SourcesDirectory)/verify
|
||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
||||
displayName: Check PyTorch correctly installed from wheel
|
||||
|
||||
# Publishing stage
|
||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)/publish
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Publish wheel to Azure Artifacts
|
||||
# The flag continueOnError=true is needed as the artifact to be published
|
||||
# may already exist, because the artifact is differentiated based on the
|
||||
# last commit date.
|
||||
- bash: |
|
||||
export TORCH_VERSION=$(head -c 5 ./version.txt)
|
||||
export LAST_COMMIT=$(git rev-parse --short HEAD)
|
||||
export LAST_COMMIT_DATE=$(git log -1 --pretty=%ad --date=format:%Y%m%d)
|
||||
cd $(Build.SourcesDirectory)/publish
|
||||
export TORCH_WHEEL=$(echo torch*linux*whl)
|
||||
az extension add -n azure-devops
|
||||
echo $ADOTOKEN | az devops login
|
||||
az artifacts universal publish --organization $AZURE_DEVOPS_ARTIFACTS_ORGANIZATION --project $AZURE_DEVOPS_ARTIFACTS_PROJECT --scope project --feed "PyTorch" --name $TORCH_WHEEL --description "PyTorch Official Build Artifact" --version $TORCH_VERSION-$LAST_COMMIT_DATE-$LAST_COMMIT --path .
|
||||
env:
|
||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
continueOnError: true
|
||||
displayName: Upload PyTorch Official Build package to Azure Artifacts
|
||||
@ -0,0 +1,150 @@
|
||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
os: ''
|
||||
cuda: ''
|
||||
is_ci_build: False
|
||||
is_official_build: False
|
||||
is_daily_build: False
|
||||
build_stage: False
|
||||
verify_stage: False
|
||||
publish_stage: False
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 300
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
CMAKE_GENERATOR: Ninja
|
||||
PACKAGE_PDBS: 0
|
||||
|
||||
steps:
|
||||
# Prepare for PyTorch build on Windows
|
||||
- template: prepare-build-template.yml
|
||||
parameters:
|
||||
configuration: $(configuration)
|
||||
build_stage: ${{ parameters.build_stage}}
|
||||
|
||||
# Build Stage
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Set up environment variables for specific pipeline build
|
||||
- template: set-environment-variables.yml
|
||||
parameters:
|
||||
os: ${{ parameters.os}}
|
||||
cuda: ${{ parameters.cuda}}
|
||||
is_official_build: ${{ parameters.is_official_build}}
|
||||
|
||||
# Sync and update PyTorch submodules
|
||||
- script: git submodule update --init --recursive --jobs 0
|
||||
displayName: Update PyTorch submodules
|
||||
|
||||
# Build PyTorch and run unit tests - no packaging
|
||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
||||
# Build PyTorch from source in develop mode with Ninja
|
||||
- script: call activate $(configuration) && python setup.py develop
|
||||
displayName: Build PyTorch from source
|
||||
|
||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
||||
- script: call activate $(configuration) && python test\test_torch.py TestTorch
|
||||
displayName: Run TestTorch unit tests
|
||||
|
||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
||||
# Run all unit tests to demonstrate successful PyTorch build
|
||||
- script: call activate $(configuration) && python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
||||
displayName: Run all unit tests
|
||||
|
||||
# Run ComponentGovernance
|
||||
- task: ComponentGovernanceComponentDetection@0
|
||||
inputs:
|
||||
scanType: 'Register'
|
||||
verbosity: 'Verbose'
|
||||
alertWarningLevel: 'High'
|
||||
|
||||
# Build PyTorch and produce artifacts for verification stage
|
||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
||||
# Build PyTorch from source in install mode with Ninja and exclude test binaries
|
||||
- script: call activate $(configuration) && python setup.py install
|
||||
displayName: Build PyTorch from source without test binaries
|
||||
|
||||
# Package PyTorch Wheel
|
||||
- script: call activate $(configuration) && python setup.py bdist_wheel
|
||||
displayName: Package PyTorch Wheel
|
||||
|
||||
# Publish PyTorch Wheel
|
||||
- task: PublishPipelineArtifact@1
|
||||
inputs:
|
||||
targetPath: $(Build.SourcesDirectory)\dist\
|
||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
||||
|
||||
# Verification Stage
|
||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)\verify
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Install PyTorch Wheel on Windows
|
||||
- script: |
|
||||
call activate $(configuration)
|
||||
cd $(Build.SourcesDirectory)\verify
|
||||
dir torch*win*.whl /b > whl.txt
|
||||
set /p whl= < whl.txt
|
||||
python -m pip install %whl%
|
||||
displayName: Install PyTorch Wheel
|
||||
|
||||
# Ensure PyTorch installed correctly from produced wheel
|
||||
- script: |
|
||||
call activate $(configuration)
|
||||
cd $(Build.SourcesDirectory)\verify
|
||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
||||
displayName: Check PyTorch correctly installed from wheel
|
||||
|
||||
# Publishing stage
|
||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)\publish
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Set up Azure Artifacts for Windows
|
||||
# The pip install --upgrade command is a bug fix for Azure CLI on Windows
|
||||
# More info: https://github.com/Azure/azure-cli/issues/16858
|
||||
- script: |
|
||||
pip install --upgrade pip --target \opt\az\lib\python3.6\site-packages\
|
||||
az extension add -n azure-devops
|
||||
displayName: Set up Azure Artifacts download on Windows
|
||||
|
||||
# Publish wheel to Azure Artifacts
|
||||
# The flag continueOnError=true is needed as the artifact to be published
|
||||
# may already exist, because the artifact is differentiated based on the
|
||||
# last commit date.
|
||||
- script: |
|
||||
set /p TORCH_VERSION= < version.txt
|
||||
cd $(Build.SourcesDirectory)\publish
|
||||
git rev-parse --short HEAD > last_commit.txt && set /p LAST_COMMIT= < last_commit.txt
|
||||
git log -1 --pretty=%ad --date=format:%Y%m%d > last_commit_date.txt && set /p LAST_COMMIT_DATE= < last_commit_date.txt
|
||||
dir torch*win*.whl /b > whl.txt && set /p TORCH_WHEEL= < whl.txt
|
||||
echo %ADOTOKEN% | az devops login
|
||||
az artifacts universal publish --organization %AZURE_DEVOPS_ARTIFACTS_ORGANIZATION% --project %AZURE_DEVOPS_ARTIFACTS_PROJECT% --scope project --feed "PyTorch" --name %TORCH_WHEEL% --description "PyTorch Official Build Artifact" --version %TORCH_VERSION:~0,5%-%LAST_COMMIT_DATE%-%LAST_COMMIT% --path .
|
||||
env:
|
||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
continueOnError: true
|
||||
displayName: Upload PyTorch nigthly package to Azure Artifacts
|
||||
17
.azure_pipelines/job_templates/common-packages.yml
Normal file
17
.azure_pipelines/job_templates/common-packages.yml
Normal file
@ -0,0 +1,17 @@
|
||||
dependencies:
|
||||
- python=PYTHON_VERSION
|
||||
- numpy
|
||||
- ninja
|
||||
- pyyaml
|
||||
- mkl
|
||||
- mkl-include
|
||||
- setuptools
|
||||
- cmake
|
||||
- cffi
|
||||
- typing_extensions
|
||||
- future
|
||||
- six
|
||||
- requests
|
||||
- dataclasses
|
||||
- pip:
|
||||
- -r ../../requirements.txt
|
||||
26
.azure_pipelines/job_templates/notify-webapp-template.yml
Normal file
26
.azure_pipelines/job_templates/notify-webapp-template.yml
Normal file
@ -0,0 +1,26 @@
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
steps:
|
||||
# Clone PyTorch Tests repository
|
||||
- bash: |
|
||||
B64_PAT=$(echo -n ":$_ADOTOKEN" | base64)
|
||||
git -c http.extraHeader="Authorization: Basic ${B64_PAT}" clone $(AZURE_DEVOPS_PYTORCH_TESTS_REPO_URL)
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
- bash: |
|
||||
bash $(Build.SourcesDirectory)/pytorch_tests/webapp/notify_webapp.sh
|
||||
displayName: Notify Webapp
|
||||
62
.azure_pipelines/job_templates/prepare-build-template.yml
Normal file
62
.azure_pipelines/job_templates/prepare-build-template.yml
Normal file
@ -0,0 +1,62 @@
|
||||
# Build prepare steps for PyTorch on Azure DevOps to build from source.
|
||||
# These steps share between normal build process and semmle security scan tasks
|
||||
|
||||
parameters:
|
||||
build_stage: False
|
||||
configuration: ''
|
||||
|
||||
steps:
|
||||
# End Python tasks that may be lingering over from previous runs
|
||||
# Note: If python.exe isn't currently running, exit code becomes 128,
|
||||
# which fails the run. Here exit code is set to 0 to avoid failed run.
|
||||
- script: |
|
||||
taskkill /f /im python.exe
|
||||
IF %ERRORLEVEL% EQU 128 exit 0
|
||||
displayName: End previous Python processes
|
||||
|
||||
# Clean up env directory in conda for fresh builds and set up conda environment YAML
|
||||
- powershell: |
|
||||
Remove-Item 'C:\Miniconda\envs' -Recurse -ErrorAction Ignore
|
||||
$env:PYTHON_VERSION = $env:SYSTEM_JOBNAME.Substring(3,1) + '.' + $env:SYSTEM_JOBNAME.Substring(4,1)
|
||||
(Get-Content .azure_pipelines\job_templates\common-packages.yml) -replace 'PYTHON_VERSION', $env:PYTHON_VERSION | Out-File -encoding ASCII .azure_pipelines\job_templates\common-packages.yml
|
||||
displayName: Clean up previous environments and Set up conda environment YAML
|
||||
|
||||
# Make conda environment and install required packages
|
||||
- script: |
|
||||
call conda clean --all -y
|
||||
call conda env create -n $(configuration) --file .azure_pipelines\job_templates\common-packages.yml
|
||||
call activate $(configuration)
|
||||
call conda install -c conda-forge libuv=1.39
|
||||
displayName: Set up conda environment for building from source
|
||||
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Install MKL
|
||||
- script: |
|
||||
rmdir /s /q mkl
|
||||
del mkl_2020.2.254.7z
|
||||
curl https://s3.amazonaws.com/ossci-windows/mkl_2020.2.254.7z -k -O
|
||||
7z x -aoa mkl_2020.2.254.7z -omkl
|
||||
displayName: Install MKL
|
||||
|
||||
# Install sccache and randomtemp
|
||||
# Related PyTorch GitHub issue: https://github.com/pytorch/pytorch/issues/25393
|
||||
# Related fix: https://github.com/pytorch/builder/pull/448/
|
||||
- script: |
|
||||
mkdir .\tmp_bin
|
||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output .\tmp_bin\sccache.exe
|
||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache-cl.exe --output .\tmp_bin\sccache-cl.exe
|
||||
copy .\tmp_bin\sccache.exe .\tmp_bin\nvcc.exe
|
||||
curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.4/randomtemp.exe --output .\tmp_bin\randomtemp.exe
|
||||
displayName: Install sccache and randomtemp
|
||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
||||
|
||||
# CUDA 11.2's CUB directory conflicts with CUDA 10.2 and 10.1
|
||||
# builds, where CUDA 11.2's CUB is injected into non-CUDA
|
||||
# 11.2 builds.
|
||||
- powershell: Remove-Item "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include\cub" -Recurse -ErrorAction Ignore
|
||||
displayName: Remove conflicting CUB from CUDA installation
|
||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
||||
|
||||
- powershell: Copy-Item -Path "F:\cuda_11_2\cub\" -Destination "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include" -Recurse
|
||||
displayName: Copy CUDA CUB for CUDA 11.2 build
|
||||
condition: eq(variables.CUDA_VERSION, '112')
|
||||
61
.azure_pipelines/job_templates/pytorch-template-unix.yml
Normal file
61
.azure_pipelines/job_templates/pytorch-template-unix.yml
Normal file
@ -0,0 +1,61 @@
|
||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
container_endpoint: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
DECODE_PERCENTS: false
|
||||
|
||||
steps:
|
||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
||||
- checkout: none
|
||||
|
||||
# Delete pytorch_tests repo from previous builds if exists
|
||||
- bash: rm -rf pytorch_tests/
|
||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
||||
|
||||
# Clone PyTorch Tests repository
|
||||
- bash: |
|
||||
B64_PAT=$(echo -n ":$_ADOTOKEN" | base64)
|
||||
git -c http.extraHeader="Authorization: Basic ${B64_PAT}" clone $(AZURE_DEVOPS_PYTORCH_TESTS_REPO_URL)
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
|
||||
# Run PyTorch Unit Tests
|
||||
- bash: bash $(Build.SourcesDirectory)/pytorch_tests/scripts/linux/run.sh
|
||||
env:
|
||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
||||
_TS_P: $(TS_PAT)
|
||||
_TS_SM_P: $(TS_SM_PAT)
|
||||
_AZUREML_CLONE_PASSWORD: $(AZUREML_CLONE_PASSWORD)
|
||||
_SPPASSWORD: $(SPPASSWORD)
|
||||
displayName: Run PyTorch Unit Tests
|
||||
|
||||
# Tests results are available outside the docker container since
|
||||
# the current directory is mounted as a volume of the container.
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
inputs:
|
||||
testResultsFiles: '**/test-*.xml'
|
||||
testRunTitle: 'Publish test results for Python'
|
||||
57
.azure_pipelines/job_templates/pytorch-template-win.yml
Normal file
57
.azure_pipelines/job_templates/pytorch-template-win.yml
Normal file
@ -0,0 +1,57 @@
|
||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
|
||||
steps:
|
||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
||||
- checkout: none
|
||||
|
||||
# Delete pytorch_tests repo from previous builds if exists
|
||||
- script: if exist "pytorch_tests/" rmdir "pytorch_tests/" /q /s
|
||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
||||
|
||||
# Clone PyTorch Tests repository
|
||||
- powershell: |
|
||||
$env:B64Pat = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes(":$env:_ADOTOKEN"))
|
||||
git -c http.extraHeader="Authorization: Basic $env:B64Pat" clone $env:AZURE_DEVOPS_pytorch_tests_REPO_URL
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
|
||||
# Run PyTorch Unit Tests
|
||||
- script: call $(Build.SourcesDirectory)\pytorch_tests\scripts\windows\run.bat
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
||||
_TS_P: $(TS_PAT)
|
||||
_TS_SM_P: $(TS_SM_PAT)
|
||||
displayName: Run PyTorch Unit Tests
|
||||
|
||||
# Tests results are available outside the docker container since
|
||||
# the current directory is mounted as a volume of the container.
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
inputs:
|
||||
testResultsFiles: '**\test-*.xml'
|
||||
testRunTitle: 'Publish test results for Python'
|
||||
129
.azure_pipelines/job_templates/set-environment-variables.yml
Normal file
129
.azure_pipelines/job_templates/set-environment-variables.yml
Normal file
@ -0,0 +1,129 @@
|
||||
# Set environment variables for specific configurations
|
||||
|
||||
parameters:
|
||||
is_official_build: False
|
||||
os: ''
|
||||
cuda: ''
|
||||
|
||||
steps:
|
||||
# Environment configuration steps for Ubuntu builds
|
||||
- ${{ if contains(parameters.os, 'ubuntu') }}:
|
||||
# Set configuration specific build flags
|
||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
||||
export PYTORCH_VERSION=$(head -c 5 ./version.txt)
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
||||
displayName: Set configuration-specific build flags
|
||||
|
||||
# Set PyTorch CPU/GPU build flags.
|
||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=USE_CUDA;]0"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
||||
displayName: Set CUDA-specific build flag for CPU builds
|
||||
|
||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=USE_CUDA;]1"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
||||
displayName: Set CUDA-specific build flag for GPU builds
|
||||
|
||||
# Set MKL environment variables
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]/opt/intel/lib:$CMAKE_LIBRARY_PATH"
|
||||
echo "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]/opt/intel/include:$CMAKE_INCLUDE_PATH"
|
||||
displayName: Set MKL paths
|
||||
|
||||
# View current environment variables
|
||||
- bash:
|
||||
printenv
|
||||
displayName: Show environment variables
|
||||
|
||||
# Environment configuration steps for Windows builds
|
||||
- ${{ if contains(parameters.os, 'windows') }}:
|
||||
# Set Conda Lib Path
|
||||
- powershell: Write-Host "##vso[task.setvariable variable=CONDA_LIB_PATH;]C:\Miniconda\envs\$(configuration)\Library\bin"
|
||||
displayName: Set Conda Lib Path
|
||||
|
||||
# Set configuration specific build flags
|
||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
||||
Set-Variable -Name PYTORCH_VERSION -Value (Get-Content .\version.txt).Substring(0,5)
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
||||
displayName: Set configuration-specific build flags
|
||||
|
||||
# Set PyTorch CPU/GPU build flags..
|
||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]0"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
||||
displayName: Set CUDA-specific build flag for CPU build
|
||||
|
||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]1"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
||||
displayName: Set CUDA-specific build flag for GPU build
|
||||
|
||||
# Set CUDA 11.2, 10.2 or 10.1 specific build flags
|
||||
- ${{ if eq(parameters.cuda, 'gpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5;8.0;8.6"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\"
|
||||
displayName: Set CUDA 11.2 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '112')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\"
|
||||
displayName: Set CUDA 10.2 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '102')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\"
|
||||
displayName: Set CUDA 10.1 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '101')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_BIN_PATH;]$env:CUDA_PATH\bin\"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_ROOT;]$env:CUDA_PATH"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_INCLUDE_DIR;]$env:CUDA_PATH\include\"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_LIBRARY;]$env:CUDA_PATH\lib\x64\"
|
||||
Write-Host "##vso[task.prependpath]$env:CUDA_PATH\bin"
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_NVCC_FLAGS;]-Xfatbin -compress-all --no-host-device-move-forward"
|
||||
Write-Host "##vso[task.setvariable variable=THRUST_IGNORE_CUB_VERSION_CHECK;]1"
|
||||
Write-Host "##vso[task.setvariable variable=NVTOOLSEXT_PATH;]C:\Program Files\NVIDIA Corporation\NvToolsExt\"
|
||||
displayName: Set CUDA environment variables
|
||||
|
||||
- powershell: |
|
||||
copy "$(CUDA_BIN_PATH)\cusparse*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cublas*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cudart*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\curand*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cufft*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cusolver*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cudnn*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\nvrtc*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64\nvToolsExt64_1.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CONDA_LIB_PATH)\libiomp*5md.dll" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CONDA_LIB_PATH)\uv.dll" $(Build.SourcesDirectory)\torch\lib
|
||||
displayName: Copy CUDA/cuDNN/libomp/libuv dlls to torch\lib
|
||||
|
||||
# Set MKL, sccache and randomtemp environment variables
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]$(Build.SourcesDirectory)\mkl\include"
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]$(Build.SourcesDirectory)\mkl\lib;$env:CMAKE_LIBRARY_PATH"
|
||||
Write-Host "##vso[task.setvariable variable=ADDITIONAL_PATH;]$(Build.SourcesDirectory)\tmp_bin"
|
||||
Write-Host "##vso[task.setvariable variable=SCCACHE_IDLE_TIMEOUT;]1500"
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_CUDA_COMPILER_LAUNCHER;]$(Build.SourcesDirectory)/tmp_bin/randomtemp.exe;$(Build.SourcesDirectory)/tmp_bin/sccache.exe"
|
||||
displayName: Set MKL, sccache and randomtemp environment variables
|
||||
|
||||
# View current environment variables
|
||||
- script:
|
||||
set
|
||||
displayName: Show environment variables
|
||||
14
.azure_pipelines/job_templates/wheel-wait-job-template.yml
Normal file
14
.azure_pipelines/job_templates/wheel-wait-job-template.yml
Normal file
@ -0,0 +1,14 @@
|
||||
# Main logic to initiate wait for PR artifact to be ready
|
||||
|
||||
steps:
|
||||
- task: InvokeRESTAPI@1
|
||||
displayName: 'Wait for job success and wheel ready'
|
||||
timeoutInMinutes: 60
|
||||
inputs:
|
||||
connectionType: 'connectedServiceName'
|
||||
serviceConnection: circleciconn
|
||||
method: 'POST'
|
||||
headers: '{"Content-Type":"application/json", "BranchName":"$(_TARGET_BRANCH_TO_CHECK)", "JobName":"$(TARGET_CIRCLECI_BUILD_PR)", "PRNumber":"$(_TARGET_PR_NUMBER)", "TargetCommit":"$(_TARGET_COMMIT)", "PlanUrl":"$(System.CollectionUri)", "ProjectId":"$(System.TeamProjectId)", "HubName":"$(System.HostType)", "PlanId":"$(System.PlanId)", "JobId":"$(System.JobId)", "TimelineId":"$(System.TimelineId)", "TaskInstanceId":"$(System.TaskInstanceId)", "AuthToken":"$(System.AccessToken)"}'
|
||||
body: ''
|
||||
urlSuffix: 'api/JobStatus'
|
||||
waitForCompletion: true
|
||||
92
.azure_pipelines/job_templates/wheel-wait-template.yml
Normal file
92
.azure_pipelines/job_templates/wheel-wait-template.yml
Normal file
@ -0,0 +1,92 @@
|
||||
# Initiate 5 agentless-server waiting jobs to check on the
|
||||
# status of PR artifact builds, for a maximum wait time of
|
||||
# 11*60 min=660 mins. These jobs will pass immediately
|
||||
# once targeted CircleCI build is ready.
|
||||
|
||||
jobs:
|
||||
- job: checkjob1
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob2
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob1
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob3
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob2
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob4
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob3
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob5
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob4
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob6
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob5
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob7
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob6
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob8
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob7
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob9
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob8
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob10
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob9
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob11
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob10
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
60
.azure_pipelines/nightly-pytorch-tests-pipeline.yml
Normal file
60
.azure_pipelines/nightly-pytorch-tests-pipeline.yml
Normal file
@ -0,0 +1,60 @@
|
||||
# PyTorch Nightly PyTorch Tests Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline runs custom PyTorch unit-tests on nightly
|
||||
# PyTorch wheels.
|
||||
|
||||
stages:
|
||||
- stage: 'NightlyCustomTests'
|
||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
||||
jobs:
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_1)
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_1)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_1)
|
||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
||||
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: $(BUILD_POOL_LIN_2)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_2)
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_2)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_2)
|
||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
||||
|
||||
- template: job_templates/pytorch-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: $(BUILD_POOL_WIN_1)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_1)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_1)
|
||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
||||
|
||||
- template: job_templates/pytorch-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: $(BUILD_POOL_WIN_2)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_2)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_2)
|
||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
||||
|
||||
- stage: 'NotifyWebapp'
|
||||
displayName: 'Notify Webapp that pipeline is finished'
|
||||
dependsOn: NightlyCustomTests
|
||||
condition: succeededOrFailed()
|
||||
jobs:
|
||||
- template: job_templates/notify-webapp-template.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
62
.azure_pipelines/pytorch-tests-pipeline.yml
Normal file
62
.azure_pipelines/pytorch-tests-pipeline.yml
Normal file
@ -0,0 +1,62 @@
|
||||
# PyTorch PR PyTorch Tests Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) ensures that CircleCI builds for a given PR
|
||||
# have finished, and that its artifacts are
|
||||
# ready for download
|
||||
# 2) runs custom PyTorch unit-tests on PyTorch
|
||||
# wheels generated during PR builds.
|
||||
|
||||
resources:
|
||||
webhooks:
|
||||
- webhook: GitHubPyTorchPRTrigger
|
||||
connection: GitHubPyTorchPRTriggerConnection
|
||||
filters:
|
||||
- path: repositoryName
|
||||
value: pytorch_tests
|
||||
|
||||
stages:
|
||||
- stage: 'EnsureArtifactsReady'
|
||||
displayName: 'Ensure PyTorch PR Artifacts are ready'
|
||||
jobs:
|
||||
- template: job_templates/wheel-wait-template.yml
|
||||
variables:
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_TARGET_PR_NUMBER: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
|
||||
- stage: 'PRCustomTests'
|
||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
||||
dependsOn: EnsureArtifactsReady
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: $(BUILD_POOL_PR)
|
||||
customMatrixes:
|
||||
PR_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_PR)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_PR)
|
||||
_TARGET_CIRCLECI_BUILD: $(TARGET_CIRCLECI_BUILD_PR)
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_TARGET_PR_NUMBER: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_PR)
|
||||
_RUN_TESTS: $(RUN_TESTS_PR)
|
||||
|
||||
- stage: 'NotifyWebapp'
|
||||
displayName: 'Notify Webapp that pipeline is finished'
|
||||
dependsOn: PRCustomTests
|
||||
condition: succeededOrFailed()
|
||||
jobs:
|
||||
- template: job_templates/notify-webapp-template.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
customMatrixes:
|
||||
PR_Notify_WebApp:
|
||||
_TARGET_CIRCLECI_BUILD: $(TARGET_CIRCLECI_BUILD_PR)
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_TARGET_PR_NUMBER: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
224
.azure_pipelines/verify-pipeline.yml
Normal file
224
.azure_pipelines/verify-pipeline.yml
Normal file
@ -0,0 +1,224 @@
|
||||
# PyTorch Official Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on all available configurations
|
||||
# 2) verifies PyTorch artifacts by installing them in a clean environment
|
||||
# and checking torch.__version_
|
||||
# 3) publishes official PyTorch artifacts to Azure DevOps Artifacts for consumption
|
||||
|
||||
stages:
|
||||
- stage: 'Build'
|
||||
displayName: 'Build PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- stage: 'Verify'
|
||||
displayName: 'Verify PyTorch wheels'
|
||||
dependsOn: Build
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- stage: 'Publish'
|
||||
displayName: 'Publish PyTorch wheels'
|
||||
dependsOn: Verify
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
@ -1,3 +0,0 @@
|
||||
# We do not use this library in our Bazel build. It contains an
|
||||
# infinitely recursing symlink that makes Bazel very unhappy.
|
||||
third_party/ittapi/
|
||||
108
.bazelrc
108
.bazelrc
@ -1,11 +1,10 @@
|
||||
build --cxxopt=--std=c++17
|
||||
build --copt=--std=c++14
|
||||
build --copt=-I.
|
||||
# Bazel does not support including its cc_library targets as system
|
||||
# headers. We work around this for generated code
|
||||
# (e.g. c10/macros/cmake_macros.h) by making the generated directory a
|
||||
# system include path.
|
||||
build --copt=-isystem --copt bazel-out/k8-fastbuild/bin
|
||||
build --copt=-isystem --copt bazel-out/darwin-fastbuild/bin
|
||||
build --experimental_ui_max_stdouterr_bytes=2048576
|
||||
|
||||
# Configuration to disable tty features for environments like CI
|
||||
@ -13,102 +12,15 @@ build:no-tty --curses no
|
||||
build:no-tty --progress_report_interval 10
|
||||
build:no-tty --show_progress_rate_limit 10
|
||||
|
||||
# Build with GPU support by default.
|
||||
build --define=cuda=true
|
||||
# rules_cuda configuration
|
||||
build --@rules_cuda//cuda:enable_cuda
|
||||
build --@rules_cuda//cuda:cuda_targets=sm_52
|
||||
build --@rules_cuda//cuda:compiler=nvcc
|
||||
build --repo_env=CUDA_PATH=/usr/local/cuda
|
||||
|
||||
# Configuration to build without GPU support
|
||||
build:cpu-only --define=cuda=false
|
||||
# Configuration to build with GPU support
|
||||
build:gpu --define=cuda=true
|
||||
# define a separate build folder for faster switching between configs
|
||||
build:cpu-only --platform_suffix=-cpu-only
|
||||
build:gpu --platform_suffix=-gpu
|
||||
# See the note on the config-less build for details about why we are
|
||||
# doing this. We must also do it for the "-cpu-only" platform suffix.
|
||||
build --copt=-isystem --copt=bazel-out/k8-fastbuild-cpu-only/bin
|
||||
# doing this. We must also do it for the "-gpu" platform suffix.
|
||||
build --copt=-isystem --copt=bazel-out/k8-fastbuild-gpu/bin
|
||||
# rules_cuda configuration
|
||||
build:cpu-only --@rules_cuda//cuda:enable_cuda=False
|
||||
|
||||
# Definition of --config=shell
|
||||
# interactive shell immediately before execution
|
||||
build:shell --run_under="//tools/bazel_tools:shellwrap"
|
||||
|
||||
# Disable all warnings for external repositories. We don't care about
|
||||
# their warnings.
|
||||
build --per_file_copt=^external/@-w
|
||||
|
||||
# Set additional warnings to error level.
|
||||
#
|
||||
# Implementation notes:
|
||||
# * we use file extensions to determine if we are using the C++
|
||||
# compiler or the cuda compiler
|
||||
# * we use ^// at the start of the regex to only permit matching
|
||||
# PyTorch files. This excludes external repos.
|
||||
#
|
||||
# Note that because this is logically a command-line flag, it is
|
||||
# considered the word on what warnings are enabled. This has the
|
||||
# unfortunate consequence of preventing us from disabling an error at
|
||||
# the target level because those flags will come before these flags in
|
||||
# the action invocation. Instead we provide per-file exceptions after
|
||||
# this.
|
||||
#
|
||||
# On the bright side, this means we don't have to more broadly apply
|
||||
# the exceptions to an entire target.
|
||||
#
|
||||
# Looking for CUDA flags? We have a cu_library macro that we can edit
|
||||
# directly. Look in //tools/rules:cu.bzl for details. Editing the
|
||||
# macro over this has the following advantages:
|
||||
# * making changes does not require discarding the Bazel analysis
|
||||
# cache
|
||||
# * it allows for selective overrides on individual targets since the
|
||||
# macro-level opts will come earlier than target level overrides
|
||||
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=all
|
||||
# The following warnings come from -Wall. We downgrade them from error
|
||||
# to warnings here.
|
||||
#
|
||||
# We intentionally use #pragma unroll, which is compiler specific.
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-error=unknown-pragmas
|
||||
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=extra
|
||||
# The following warnings come from -Wextra. We downgrade them from error
|
||||
# to warnings here.
|
||||
#
|
||||
# unused-parameter-compare has a tremendous amount of violations in the
|
||||
# codebase. It will be a lot of work to fix them, just disable it for
|
||||
# now.
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-parameter
|
||||
# missing-field-parameters has both a large number of violations in
|
||||
# the codebase, but it also is used pervasively in the Python C
|
||||
# API. There are a couple of catches though:
|
||||
# * we use multiple versions of the Python API and hence have
|
||||
# potentially multiple different versions of each relevant
|
||||
# struct. They may have different numbers of fields. It will be
|
||||
# unwieldy to support multiple versions in the same source file.
|
||||
# * Python itself for many of these structs recommends only
|
||||
# initializing a subset of the fields. We should respect the API
|
||||
# usage conventions of our dependencies.
|
||||
#
|
||||
# Hence, we just disable this warning altogether. We may want to clean
|
||||
# up some of the clear-cut cases that could be risky, but we still
|
||||
# likely want to have this disabled for the most part.
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-missing-field-initializers
|
||||
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-function
|
||||
build --per_file_copt='^//.*\.(cpp|cc)$'@-Wno-unused-variable
|
||||
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterCompositeExplicitAutograd\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterCompositeImplicitAutograd\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterMkldnnCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterNestedTensorCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterQuantizedCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterSparseCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterSparseCsrCPU\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterNestedTensorMeta\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterSparseMeta\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterQuantizedMeta\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:aten/src/ATen/RegisterZeroTensor\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:torch/csrc/lazy/generated/RegisterAutogradLazy\.cpp$'@-Wno-error=unused-function
|
||||
build --per_file_copt='//:torch/csrc/lazy/generated/RegisterLazy\.cpp$'@-Wno-error=unused-function
|
||||
build:gpu --@rules_cuda//cuda:enable_cuda
|
||||
build:gpu --@rules_cuda//cuda:cuda_targets=sm_52
|
||||
build:gpu --@rules_cuda//cuda:compiler=nvcc
|
||||
build:gpu --repo_env=CUDA_PATH=/usr/local/cuda
|
||||
|
||||
@ -1 +1 @@
|
||||
6.1.1
|
||||
4.2.1
|
||||
|
||||
@ -1,26 +0,0 @@
|
||||
[pt]
|
||||
is_oss=1
|
||||
|
||||
[buildfile]
|
||||
name = BUCK.oss
|
||||
includes = //tools/build_defs/select.bzl
|
||||
|
||||
[repositories]
|
||||
bazel_skylib = third_party/bazel-skylib/
|
||||
ovr_config = .
|
||||
|
||||
[download]
|
||||
in_build = true
|
||||
|
||||
[cxx]
|
||||
cxxflags = -std=c++17
|
||||
ldflags = -Wl,--no-undefined
|
||||
should_remap_host_platform = true
|
||||
cpp = /usr/bin/clang
|
||||
cc = /usr/bin/clang
|
||||
cxx = /usr/bin/clang++
|
||||
cxxpp = /usr/bin/clang++
|
||||
ld = /usr/bin/clang++
|
||||
|
||||
[project]
|
||||
default_flavors_mode=all
|
||||
@ -1,36 +0,0 @@
|
||||
set -ex
|
||||
|
||||
LOCAL_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
ROOT_DIR=$(cd "$LOCAL_DIR"/../.. && pwd)
|
||||
TEST_DIR="$ROOT_DIR/test"
|
||||
gtest_reports_dir="${TEST_DIR}/test-reports/cpp"
|
||||
pytest_reports_dir="${TEST_DIR}/test-reports/python"
|
||||
|
||||
# Figure out which Python to use
|
||||
PYTHON="$(which python)"
|
||||
if [[ "${BUILD_ENVIRONMENT}" =~ py((2|3)\.?[0-9]?\.?[0-9]?) ]]; then
|
||||
PYTHON=$(which "python${BASH_REMATCH[1]}")
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *rocm* ]]; then
|
||||
# HIP_PLATFORM is auto-detected by hipcc; unset to avoid build errors
|
||||
unset HIP_PLATFORM
|
||||
if which sccache > /dev/null; then
|
||||
# Save sccache logs to file
|
||||
sccache --stop-server || true
|
||||
rm -f ~/sccache_error.log || true
|
||||
SCCACHE_ERROR_LOG=~/sccache_error.log SCCACHE_IDLE_TIMEOUT=0 sccache --start-server
|
||||
|
||||
# Report sccache stats for easier debugging
|
||||
sccache --zero-stats
|
||||
fi
|
||||
fi
|
||||
|
||||
# /usr/local/caffe2 is where the cpp bits are installed to in cmake-only
|
||||
# builds. In +python builds the cpp tests are copied to /usr/local/caffe2 so
|
||||
# that the test code in .ci/test.sh is the same
|
||||
INSTALL_PREFIX="/usr/local/caffe2"
|
||||
|
||||
mkdir -p "$gtest_reports_dir" || true
|
||||
mkdir -p "$pytest_reports_dir" || true
|
||||
mkdir -p "$INSTALL_PREFIX" || true
|
||||
@ -1,172 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *onnx* ]]; then
|
||||
pip install click mock tabulate networkx==2.0
|
||||
pip -q install --user "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx"
|
||||
fi
|
||||
|
||||
# Skip tests in environments where they are not built/applicable
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-android* ]]; then
|
||||
echo 'Skipping tests'
|
||||
exit 0
|
||||
fi
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-rocm* ]]; then
|
||||
# temporary to locate some kernel issues on the CI nodes
|
||||
export HSAKMT_DEBUG_LEVEL=4
|
||||
fi
|
||||
# These additional packages are needed for circleci ROCm builds.
|
||||
if [[ $BUILD_ENVIRONMENT == *rocm* ]]; then
|
||||
# Need networkx 2.0 because bellmand_ford was moved in 2.1 . Scikit-image by
|
||||
# defaults installs the most recent networkx version, so we install this lower
|
||||
# version explicitly before scikit-image pulls it in as a dependency
|
||||
pip install networkx==2.0
|
||||
# click - onnx
|
||||
pip install --progress-bar off click protobuf tabulate virtualenv mock typing-extensions
|
||||
fi
|
||||
|
||||
# Find where cpp tests and Caffe2 itself are installed
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then
|
||||
# For cmake only build we install everything into /usr/local
|
||||
cpp_test_dir="$INSTALL_PREFIX/cpp_test"
|
||||
ld_library_path="$INSTALL_PREFIX/lib"
|
||||
else
|
||||
# For Python builds we install into python
|
||||
# cd to /usr first so the python import doesn't get confused by any 'caffe2'
|
||||
# directory in cwd
|
||||
python_installation="$(dirname $(dirname $(cd /usr && $PYTHON -c 'import os; import caffe2; print(os.path.realpath(caffe2.__file__))')))"
|
||||
caffe2_pypath="$python_installation/caffe2"
|
||||
cpp_test_dir="$python_installation/torch/test"
|
||||
ld_library_path="$python_installation/torch/lib"
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
# C++ tests #
|
||||
################################################################################
|
||||
# Only run cpp tests in the first shard, don't run cpp tests a second time in the second shard
|
||||
if [[ "${SHARD_NUMBER:-1}" == "1" ]]; then
|
||||
echo "Running C++ tests.."
|
||||
for test in $(find "$cpp_test_dir" -executable -type f); do
|
||||
case "$test" in
|
||||
# skip tests we know are hanging or bad
|
||||
*/mkl_utils_test|*/aten/integer_divider_test)
|
||||
continue
|
||||
;;
|
||||
*/scalar_tensor_test|*/basic|*/native_test)
|
||||
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
continue
|
||||
else
|
||||
LD_LIBRARY_PATH="$ld_library_path" "$test"
|
||||
fi
|
||||
;;
|
||||
*/*_benchmark)
|
||||
LD_LIBRARY_PATH="$ld_library_path" "$test" --benchmark_color=false
|
||||
;;
|
||||
*)
|
||||
# Currently, we use a mixture of gtest (caffe2) and Catch2 (ATen). While
|
||||
# planning to migrate to gtest as the common PyTorch c++ test suite, we
|
||||
# currently do NOT use the xml test reporter, because Catch doesn't
|
||||
# support multiple reporters
|
||||
# c.f. https://github.com/catchorg/Catch2/blob/master/docs/release-notes.md#223
|
||||
# which means that enabling XML output means you lose useful stdout
|
||||
# output for Jenkins. It's more important to have useful console
|
||||
# output than it is to have XML output for Jenkins.
|
||||
# Note: in the future, if we want to use xml test reporter once we switch
|
||||
# to all gtest, one can simply do:
|
||||
LD_LIBRARY_PATH="$ld_library_path" \
|
||||
"$test" --gtest_output=xml:"$gtest_reports_dir/$(basename $test).xml"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
fi
|
||||
|
||||
################################################################################
|
||||
# Python tests #
|
||||
################################################################################
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# If pip is installed as root, we must use sudo.
|
||||
# CircleCI docker images could install conda as jenkins user, or use the OS's python package.
|
||||
PIP=$(which pip)
|
||||
PIP_USER=$(stat --format '%U' $PIP)
|
||||
CURRENT_USER=$(id -u -n)
|
||||
if [[ "$PIP_USER" = root && "$CURRENT_USER" != root ]]; then
|
||||
MAYBE_SUDO=sudo
|
||||
fi
|
||||
|
||||
# Uninstall pre-installed hypothesis and coverage to use an older version as newer
|
||||
# versions remove the timeout parameter from settings which ideep/conv_transpose_test.py uses
|
||||
$MAYBE_SUDO pip -q uninstall -y hypothesis
|
||||
$MAYBE_SUDO pip -q uninstall -y coverage
|
||||
|
||||
# "pip install hypothesis==3.44.6" from official server is unreliable on
|
||||
# CircleCI, so we host a copy on S3 instead
|
||||
$MAYBE_SUDO pip -q install attrs==18.1.0 -f https://s3.amazonaws.com/ossci-linux/wheels/attrs-18.1.0-py2.py3-none-any.whl
|
||||
$MAYBE_SUDO pip -q install coverage==4.5.1 -f https://s3.amazonaws.com/ossci-linux/wheels/coverage-4.5.1-cp36-cp36m-macosx_10_12_x86_64.whl
|
||||
$MAYBE_SUDO pip -q install hypothesis==3.44.6 -f https://s3.amazonaws.com/ossci-linux/wheels/hypothesis-3.44.6-py3-none-any.whl
|
||||
|
||||
# Collect additional tests to run (outside caffe2/python)
|
||||
EXTRA_TESTS=()
|
||||
|
||||
# CUDA builds always include NCCL support
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-cuda* ]] || [[ "$BUILD_ENVIRONMENT" == *-rocm* ]]; then
|
||||
EXTRA_TESTS+=("$caffe2_pypath/contrib/nccl")
|
||||
fi
|
||||
|
||||
rocm_ignore_test=()
|
||||
if [[ $BUILD_ENVIRONMENT == *-rocm* ]]; then
|
||||
# Currently these tests are failing on ROCM platform:
|
||||
|
||||
# On ROCm, RCCL (distributed) development isn't complete.
|
||||
# https://github.com/ROCmSoftwarePlatform/rccl
|
||||
rocm_ignore_test+=("--ignore $caffe2_pypath/python/data_parallel_model_test.py")
|
||||
|
||||
# This test has been flaky in ROCm CI (but note the tests are
|
||||
# cpu-only so should be unrelated to ROCm)
|
||||
rocm_ignore_test+=("--ignore $caffe2_pypath/python/operator_test/blobs_queue_db_test.py")
|
||||
# This test is skipped on Jenkins(compiled without MKL) and otherwise known flaky
|
||||
rocm_ignore_test+=("--ignore $caffe2_pypath/python/ideep/convfusion_op_test.py")
|
||||
# This test is skipped on Jenkins(compiled without MKL) and causing segfault on Circle
|
||||
rocm_ignore_test+=("--ignore $caffe2_pypath/python/ideep/pool_op_test.py")
|
||||
fi
|
||||
|
||||
echo "Running Python tests.."
|
||||
# locale setting is required by click package
|
||||
for loc in "en_US.utf8" "C.UTF-8"; do
|
||||
if locale -a | grep "$loc" >/dev/null 2>&1; then
|
||||
export LC_ALL="$loc"
|
||||
export LANG="$loc"
|
||||
break;
|
||||
fi
|
||||
done
|
||||
|
||||
# Some Caffe2 tests fail when run using AVX512 ISA, see https://github.com/pytorch/pytorch/issues/66111
|
||||
export DNNL_MAX_CPU_ISA=AVX2
|
||||
|
||||
# Should still run even in the absence of SHARD_NUMBER
|
||||
if [[ "${SHARD_NUMBER:-1}" == "1" ]]; then
|
||||
# TODO(sdym@meta.com) remove this when the linked issue resolved.
|
||||
# py is temporary until https://github.com/Teemu/pytest-sugar/issues/241 is fixed
|
||||
pip install --user py==1.11.0
|
||||
pip install --user pytest-sugar
|
||||
# NB: Warnings are disabled because they make it harder to see what
|
||||
# the actual erroring test is
|
||||
"$PYTHON" \
|
||||
-m pytest \
|
||||
-x \
|
||||
-v \
|
||||
--disable-warnings \
|
||||
--junit-xml="$pytest_reports_dir/result.xml" \
|
||||
--ignore "$caffe2_pypath/python/test/executor_test.py" \
|
||||
--ignore "$caffe2_pypath/python/operator_test/matmul_op_test.py" \
|
||||
--ignore "$caffe2_pypath/python/operator_test/pack_ops_test.py" \
|
||||
--ignore "$caffe2_pypath/python/mkl/mkl_sbn_speed_test.py" \
|
||||
--ignore "$caffe2_pypath/python/trt/test_pt_onnx_trt.py" \
|
||||
${rocm_ignore_test[@]} \
|
||||
"$caffe2_pypath/python" \
|
||||
"${EXTRA_TESTS[@]}"
|
||||
fi
|
||||
@ -1,31 +0,0 @@
|
||||
# Docker images for GitHub CI
|
||||
|
||||
This directory contains everything needed to build the Docker images
|
||||
that are used in our CI.
|
||||
|
||||
The Dockerfiles located in subdirectories are parameterized to
|
||||
conditionally run build stages depending on build arguments passed to
|
||||
`docker build`. This lets us use only a few Dockerfiles for many
|
||||
images. The different configurations are identified by a freeform
|
||||
string that we call a _build environment_. This string is persisted in
|
||||
each image as the `BUILD_ENVIRONMENT` environment variable.
|
||||
|
||||
See `build.sh` for valid build environments (it's the giant switch).
|
||||
|
||||
## Contents
|
||||
|
||||
* `build.sh` -- dispatch script to launch all builds
|
||||
* `common` -- scripts used to execute individual Docker build stages
|
||||
* `ubuntu` -- Dockerfile for Ubuntu image for CPU build and test jobs
|
||||
* `ubuntu-cuda` -- Dockerfile for Ubuntu image with CUDA support for nvidia-docker
|
||||
* `ubuntu-rocm` -- Dockerfile for Ubuntu image with ROCm support
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Build a specific image
|
||||
./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest
|
||||
|
||||
# Set flags (see build.sh) and build image
|
||||
sudo bash -c 'PROTOBUF=1 ./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest
|
||||
```
|
||||
@ -1,438 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
image="$1"
|
||||
shift
|
||||
|
||||
if [ -z "${image}" ]; then
|
||||
echo "Usage: $0 IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function extract_version_from_image_name() {
|
||||
eval export $2=$(echo "${image}" | perl -n -e"/$1(\d+(\.\d+)?(\.\d+)?)/ && print \$1")
|
||||
if [ "x${!2}" = x ]; then
|
||||
echo "variable '$2' not correctly parsed from image='$image'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function extract_all_from_image_name() {
|
||||
# parts $image into array, splitting on '-'
|
||||
keep_IFS="$IFS"
|
||||
IFS="-"
|
||||
declare -a parts=($image)
|
||||
IFS="$keep_IFS"
|
||||
unset keep_IFS
|
||||
|
||||
for part in "${parts[@]}"; do
|
||||
name=$(echo "${part}" | perl -n -e"/([a-zA-Z]+)\d+(\.\d+)?(\.\d+)?/ && print \$1")
|
||||
vername="${name^^}_VERSION"
|
||||
# "py" is the odd one out, needs this special case
|
||||
if [ "x${name}" = xpy ]; then
|
||||
vername=ANACONDA_PYTHON_VERSION
|
||||
fi
|
||||
# skip non-conforming fields such as "pytorch", "linux" or "bionic" without version string
|
||||
if [ -n "${name}" ]; then
|
||||
extract_version_from_image_name "${name}" "${vername}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Use the same pre-built XLA test image from PyTorch/XLA
|
||||
if [[ "$image" == *xla* ]]; then
|
||||
echo "Using pre-built XLA test image..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "$image" == *-focal* ]]; then
|
||||
UBUNTU_VERSION=20.04
|
||||
elif [[ "$image" == *-jammy* ]]; then
|
||||
UBUNTU_VERSION=22.04
|
||||
elif [[ "$image" == *ubuntu* ]]; then
|
||||
extract_version_from_image_name ubuntu UBUNTU_VERSION
|
||||
elif [[ "$image" == *centos* ]]; then
|
||||
extract_version_from_image_name centos CENTOS_VERSION
|
||||
fi
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ]; then
|
||||
OS="ubuntu"
|
||||
elif [ -n "${CENTOS_VERSION}" ]; then
|
||||
OS="centos"
|
||||
else
|
||||
echo "Unable to derive operating system base..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DOCKERFILE="${OS}/Dockerfile"
|
||||
# When using ubuntu - 22.04, start from Ubuntu docker image, instead of nvidia/cuda docker image.
|
||||
if [[ "$image" == *cuda* && "$UBUNTU_VERSION" != "22.04" ]]; then
|
||||
DOCKERFILE="${OS}-cuda/Dockerfile"
|
||||
elif [[ "$image" == *rocm* ]]; then
|
||||
DOCKERFILE="${OS}-rocm/Dockerfile"
|
||||
elif [[ "$image" == *cuda*linter* ]]; then
|
||||
# Use a separate Dockerfile for linter to keep a small image size
|
||||
DOCKERFILE="linter-cuda/Dockerfile"
|
||||
elif [[ "$image" == *linter* ]]; then
|
||||
# Use a separate Dockerfile for linter to keep a small image size
|
||||
DOCKERFILE="linter/Dockerfile"
|
||||
fi
|
||||
|
||||
# CMake 3.18 is needed to support CUDA17 language variant
|
||||
CMAKE_VERSION=3.18.5
|
||||
|
||||
_UCX_COMMIT=00bcc6bb18fc282eb160623b4c0d300147f579af
|
||||
_UCC_COMMIT=7cb07a76ccedad7e56ceb136b865eb9319c258ea
|
||||
|
||||
# It's annoying to rename jobs every time you want to rewrite a
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
# from scratch
|
||||
case "$image" in
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9)
|
||||
CUDA_VERSION=11.8.0
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9)
|
||||
CUDA_VERSION=12.1.1
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
UCX_COMMIT=${_UCX_COMMIT}
|
||||
UCC_COMMIT=${_UCC_COMMIT}
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-py3-clang10-onnx)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CLANG_VERSION=10
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
CONDA_CMAKE=yes
|
||||
ONNX=yes
|
||||
;;
|
||||
pytorch-linux-focal-py3-clang9-android-ndk-r21e)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CLANG_VERSION=9
|
||||
LLVMDEV=yes
|
||||
PROTOBUF=yes
|
||||
ANDROID=yes
|
||||
ANDROID_NDK_VERSION=r21e
|
||||
GRADLE_VERSION=6.8.3
|
||||
NINJA_VERSION=1.9.0
|
||||
;;
|
||||
pytorch-linux-focal-py3.8-clang10)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CLANG_VERSION=10
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
VULKAN_SDK_VERSION=1.2.162.1
|
||||
SWIFTSHADER=yes
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-py3.11-clang10)
|
||||
ANACONDA_PYTHON_VERSION=3.11
|
||||
CLANG_VERSION=10
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
VULKAN_SDK_VERSION=1.2.162.1
|
||||
SWIFTSHADER=yes
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-py3.8-gcc9)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-rocm-n-1-py3)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=5.6
|
||||
NINJA_VERSION=1.9.0
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-focal-rocm-n-py3)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=5.7
|
||||
NINJA_VERSION=1.9.0
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=11
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
DOCS=yes
|
||||
INDUCTOR_BENCHMARKS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CUDA_VERSION=11.8
|
||||
CUDNN_VERSION=8
|
||||
CLANG_VERSION=12
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang12-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CLANG_VERSION=12
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang15-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CLANG_VERSION=15
|
||||
CONDA_CMAKE=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.8-gcc11)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=11
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
CONDA_CMAKE=yes
|
||||
TRITON=yes
|
||||
DOCS=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3-clang12-executorch)
|
||||
ANACONDA_PYTHON_VERSION=3.10
|
||||
CLANG_VERSION=12
|
||||
CONDA_CMAKE=yes
|
||||
EXECUTORCH=yes
|
||||
;;
|
||||
pytorch-linux-focal-linter)
|
||||
# TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
|
||||
# We will need to update mypy version eventually, but that's for another day. The task
|
||||
# would be to upgrade mypy to 1.0.0 with Python 3.11
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CONDA_CMAKE=yes
|
||||
;;
|
||||
pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter)
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
CUDA_VERSION=11.8
|
||||
CONDA_CMAKE=yes
|
||||
;;
|
||||
*)
|
||||
# Catch-all for builds that are not hardcoded.
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
echo "image '$image' did not match an existing build configuration"
|
||||
if [[ "$image" == *py* ]]; then
|
||||
extract_version_from_image_name py ANACONDA_PYTHON_VERSION
|
||||
fi
|
||||
if [[ "$image" == *cuda* ]]; then
|
||||
extract_version_from_image_name cuda CUDA_VERSION
|
||||
extract_version_from_image_name cudnn CUDNN_VERSION
|
||||
fi
|
||||
if [[ "$image" == *rocm* ]]; then
|
||||
extract_version_from_image_name rocm ROCM_VERSION
|
||||
NINJA_VERSION=1.9.0
|
||||
TRITON=yes
|
||||
# To ensure that any ROCm config will build using conda cmake
|
||||
# and thus have LAPACK/MKL enabled
|
||||
CONDA_CMAKE=yes
|
||||
fi
|
||||
if [[ "$image" == *centos7* ]]; then
|
||||
NINJA_VERSION=1.10.2
|
||||
fi
|
||||
if [[ "$image" == *gcc* ]]; then
|
||||
extract_version_from_image_name gcc GCC_VERSION
|
||||
fi
|
||||
if [[ "$image" == *clang* ]]; then
|
||||
extract_version_from_image_name clang CLANG_VERSION
|
||||
fi
|
||||
if [[ "$image" == *devtoolset* ]]; then
|
||||
extract_version_from_image_name devtoolset DEVTOOLSET_VERSION
|
||||
fi
|
||||
if [[ "$image" == *glibc* ]]; then
|
||||
extract_version_from_image_name glibc GLIBC_VERSION
|
||||
fi
|
||||
if [[ "$image" == *cmake* ]]; then
|
||||
extract_version_from_image_name cmake CMAKE_VERSION
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
#when using cudnn version 8 install it separately from cuda
|
||||
if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build image
|
||||
docker build \
|
||||
--no-cache \
|
||||
--progress=plain \
|
||||
--build-arg "BUILD_ENVIRONMENT=${image}" \
|
||||
--build-arg "PROTOBUF=${PROTOBUF:-}" \
|
||||
--build-arg "LLVMDEV=${LLVMDEV:-}" \
|
||||
--build-arg "DB=${DB:-}" \
|
||||
--build-arg "VISION=${VISION:-}" \
|
||||
--build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \
|
||||
--build-arg "CENTOS_VERSION=${CENTOS_VERSION}" \
|
||||
--build-arg "DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" \
|
||||
--build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \
|
||||
--build-arg "CLANG_VERSION=${CLANG_VERSION}" \
|
||||
--build-arg "ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION}" \
|
||||
--build-arg "GCC_VERSION=${GCC_VERSION}" \
|
||||
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
|
||||
--build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \
|
||||
--build-arg "TENSORRT_VERSION=${TENSORRT_VERSION}" \
|
||||
--build-arg "ANDROID=${ANDROID}" \
|
||||
--build-arg "ANDROID_NDK=${ANDROID_NDK_VERSION}" \
|
||||
--build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \
|
||||
--build-arg "VULKAN_SDK_VERSION=${VULKAN_SDK_VERSION}" \
|
||||
--build-arg "SWIFTSHADER=${SWIFTSHADER}" \
|
||||
--build-arg "CMAKE_VERSION=${CMAKE_VERSION:-}" \
|
||||
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
|
||||
--build-arg "KATEX=${KATEX:-}" \
|
||||
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
|
||||
--build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx906;gfx90a}" \
|
||||
--build-arg "IMAGE_NAME=${IMAGE_NAME}" \
|
||||
--build-arg "UCX_COMMIT=${UCX_COMMIT}" \
|
||||
--build-arg "UCC_COMMIT=${UCC_COMMIT}" \
|
||||
--build-arg "CONDA_CMAKE=${CONDA_CMAKE}" \
|
||||
--build-arg "TRITON=${TRITON}" \
|
||||
--build-arg "ONNX=${ONNX}" \
|
||||
--build-arg "DOCS=${DOCS}" \
|
||||
--build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \
|
||||
--build-arg "EXECUTORCH=${EXECUTORCH}" \
|
||||
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
||||
-t "$tmp_tag" \
|
||||
"$@" \
|
||||
.
|
||||
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
|
||||
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
|
||||
# find the correct image. As a result, here we have to replace the
|
||||
# "$UBUNTU_VERSION" == "18.04-rc"
|
||||
# with
|
||||
# "$UBUNTU_VERSION" == "18.04"
|
||||
UBUNTU_VERSION=$(echo ${UBUNTU_VERSION} | sed 's/-rc$//')
|
||||
|
||||
function drun() {
|
||||
docker run --rm "$tmp_tag" $*
|
||||
}
|
||||
|
||||
if [[ "$OS" == "ubuntu" ]]; then
|
||||
|
||||
if !(drun lsb_release -a 2>&1 | grep -qF Ubuntu); then
|
||||
echo "OS=ubuntu, but:"
|
||||
drun lsb_release -a
|
||||
exit 1
|
||||
fi
|
||||
if !(drun lsb_release -a 2>&1 | grep -qF "$UBUNTU_VERSION"); then
|
||||
echo "UBUNTU_VERSION=$UBUNTU_VERSION, but:"
|
||||
drun lsb_release -a
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
if !(drun python --version 2>&1 | grep -qF "Python $ANACONDA_PYTHON_VERSION"); then
|
||||
echo "ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION, but:"
|
||||
drun python --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$GCC_VERSION" ]; then
|
||||
if !(drun gcc --version 2>&1 | grep -q " $GCC_VERSION\\W"); then
|
||||
echo "GCC_VERSION=$GCC_VERSION, but:"
|
||||
drun gcc --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$CLANG_VERSION" ]; then
|
||||
if !(drun clang --version 2>&1 | grep -qF "clang version $CLANG_VERSION"); then
|
||||
echo "CLANG_VERSION=$CLANG_VERSION, but:"
|
||||
drun clang --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$KATEX" ]; then
|
||||
if !(drun katex --version); then
|
||||
echo "KATEX=$KATEX, but:"
|
||||
drun katex --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
@ -1,123 +0,0 @@
|
||||
ARG CENTOS_VERSION
|
||||
|
||||
FROM centos:${CENTOS_VERSION}
|
||||
|
||||
ARG CENTOS_VERSION
|
||||
|
||||
# Set AMD gpu targets to build for
|
||||
ARG PYTORCH_ROCM_ARCH
|
||||
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
|
||||
|
||||
# Install required packages to build Caffe2
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Update CentOS git version
|
||||
RUN yum -y remove git
|
||||
RUN yum -y remove git-*
|
||||
RUN yum -y install https://packages.endpoint.com/rhel/7/os/x86_64/endpoint-repo-1.9-1.x86_64.rpm || \
|
||||
(yum -y install https://packages.endpointdev.com/rhel/7/os/x86_64/endpoint-repo-1.9-1.x86_64.rpm && \
|
||||
sed -i "s/packages.endpoint/packages.endpointdev/" /etc/yum.repos.d/endpoint.repo)
|
||||
RUN yum install -y git
|
||||
|
||||
# Install devtoolset
|
||||
ARG DEVTOOLSET_VERSION
|
||||
COPY ./common/install_devtoolset.sh install_devtoolset.sh
|
||||
RUN bash ./install_devtoolset.sh && rm install_devtoolset.sh
|
||||
ENV BASH_ENV "/etc/profile"
|
||||
|
||||
# (optional) Install non-default glibc version
|
||||
ARG GLIBC_VERSION
|
||||
COPY ./common/install_glibc.sh install_glibc.sh
|
||||
RUN if [ -n "${GLIBC_VERSION}" ]; then bash ./install_glibc.sh; fi
|
||||
RUN rm install_glibc.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG CONDA_CMAKE
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
COPY ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
COPY ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# Install rocm
|
||||
ARG ROCM_VERSION
|
||||
COPY ./common/install_rocm.sh install_rocm.sh
|
||||
RUN bash ./install_rocm.sh
|
||||
RUN rm install_rocm.sh
|
||||
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh
|
||||
RUN rm install_rocm_magma.sh
|
||||
ENV PATH /opt/rocm/bin:$PATH
|
||||
ENV PATH /opt/rocm/hcc/bin:$PATH
|
||||
ENV PATH /opt/rocm/hip/bin:$PATH
|
||||
ENV PATH /opt/rocm/opencl/bin:$PATH
|
||||
ENV PATH /opt/rocm/llvm/bin:$PATH
|
||||
ENV MAGMA_HOME /opt/rocm/magma
|
||||
ENV LANG en_US.utf8
|
||||
ENV LC_ALL en_US.utf8
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
COPY ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
ARG TRITON
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
ENV CMAKE_C_COMPILER cc
|
||||
ENV CMAKE_CXX_COMPILER c++
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton-rocm.txt triton-rocm.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton-rocm.txt triton_version.txt
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1 +0,0 @@
|
||||
ca6322dcfc51b209a06b76d160bd95d81d58f15c
|
||||
@ -1 +0,0 @@
|
||||
6c26faa159b79a42d7fa46cb66e2d21523351987
|
||||
@ -1 +0,0 @@
|
||||
730b907b4d45a4713cbc425cbf224c46089fd514
|
||||
@ -1 +0,0 @@
|
||||
dafe1459823b9549417ed95e9720f1b594fab329
|
||||
@ -1 +0,0 @@
|
||||
bcad9dabe15021c53b6a88296e9d7a210044f108
|
||||
@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
# Cache the test models at ~/.cache/torch/hub/
|
||||
IMPORT_SCRIPT_FILENAME="/tmp/torchvision_import_script.py"
|
||||
as_jenkins echo 'import torchvision; torchvision.models.mobilenet_v2(pretrained=True); torchvision.models.mobilenet_v3_large(pretrained=True);' > "${IMPORT_SCRIPT_FILENAME}"
|
||||
|
||||
pip_install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
# Very weird quoting behavior here https://github.com/conda/conda/issues/10972,
|
||||
# so echo the command to a file and run the file instead
|
||||
conda_run python "${IMPORT_SCRIPT_FILENAME}"
|
||||
|
||||
# Cleaning up
|
||||
conda_run pip uninstall -y torch torchvision
|
||||
rm "${IMPORT_SCRIPT_FILENAME}" || true
|
||||
@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Work around bug where devtoolset replaces sudo and breaks it.
|
||||
if [ -n "$DEVTOOLSET_VERSION" ]; then
|
||||
export SUDO=/bin/sudo
|
||||
else
|
||||
export SUDO=sudo
|
||||
fi
|
||||
|
||||
as_jenkins() {
|
||||
# NB: unsetting the environment variables works around a conda bug
|
||||
# https://github.com/conda/conda/issues/6576
|
||||
# NB: Pass on PATH and LD_LIBRARY_PATH to sudo invocation
|
||||
# NB: This must be run from a directory that jenkins has access to,
|
||||
# works around https://github.com/conda/conda-package-handling/pull/34
|
||||
$SUDO -E -H -u jenkins env -u SUDO_UID -u SUDO_GID -u SUDO_COMMAND -u SUDO_USER env "PATH=$PATH" "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" $*
|
||||
}
|
||||
|
||||
conda_install() {
|
||||
# Ensure that the install command don't upgrade/downgrade Python
|
||||
# This should be called as
|
||||
# conda_install pkg1 pkg2 ... [-c channel]
|
||||
as_jenkins conda install -q -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION" $*
|
||||
}
|
||||
|
||||
conda_run() {
|
||||
as_jenkins conda run -n py_$ANACONDA_PYTHON_VERSION --no-capture-output $*
|
||||
}
|
||||
|
||||
pip_install() {
|
||||
as_jenkins conda run -n py_$ANACONDA_PYTHON_VERSION pip install --progress-bar off $*
|
||||
}
|
||||
|
||||
get_pinned_commit() {
|
||||
cat "${1}".txt
|
||||
}
|
||||
@ -1,158 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
install_ubuntu() {
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
|
||||
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
|
||||
# find the correct image. As a result, here we have to check for
|
||||
# "$UBUNTU_VERSION" == "18.04"*
|
||||
# instead of
|
||||
# "$UBUNTU_VERSION" == "18.04"
|
||||
if [[ "$UBUNTU_VERSION" == "20.04"* ]]; then
|
||||
cmake3="cmake=3.16*"
|
||||
maybe_libiomp_dev=""
|
||||
elif [[ "$UBUNTU_VERSION" == "22.04"* ]]; then
|
||||
cmake3="cmake=3.22*"
|
||||
maybe_libiomp_dev=""
|
||||
else
|
||||
cmake3="cmake=3.5*"
|
||||
maybe_libiomp_dev="libiomp-dev"
|
||||
fi
|
||||
|
||||
if [[ "$CLANG_VERSION" == 15 ]]; then
|
||||
maybe_libomp_dev="libomp-15-dev"
|
||||
elif [[ "$CLANG_VERSION" == 12 ]]; then
|
||||
maybe_libomp_dev="libomp-12-dev"
|
||||
elif [[ "$CLANG_VERSION" == 10 ]]; then
|
||||
maybe_libomp_dev="libomp-10-dev"
|
||||
else
|
||||
maybe_libomp_dev=""
|
||||
fi
|
||||
|
||||
# HACK: UCC testing relies on libnccl library from NVIDIA repo, and version 2.16 crashes
|
||||
# See https://github.com/pytorch/pytorch/pull/105260#issuecomment-1673399729
|
||||
if [[ "$UBUNTU_VERSION" == "20.04"* && "$CUDA_VERSION" == "11.8"* ]]; then
|
||||
maybe_libnccl_dev="libnccl2=2.15.5-1+cuda11.8 libnccl-dev=2.15.5-1+cuda11.8 --allow-downgrades --allow-change-held-packages"
|
||||
else
|
||||
maybe_libnccl_dev=""
|
||||
fi
|
||||
|
||||
# Install common dependencies
|
||||
apt-get update
|
||||
# TODO: Some of these may not be necessary
|
||||
ccache_deps="asciidoc docbook-xml docbook-xsl xsltproc"
|
||||
deploy_deps="libffi-dev libbz2-dev libreadline-dev libncurses5-dev libncursesw5-dev libgdbm-dev libsqlite3-dev uuid-dev tk-dev"
|
||||
numpy_deps="gfortran"
|
||||
apt-get install -y --no-install-recommends \
|
||||
$ccache_deps \
|
||||
$numpy_deps \
|
||||
${deploy_deps} \
|
||||
${cmake3} \
|
||||
apt-transport-https \
|
||||
autoconf \
|
||||
automake \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
libatlas-base-dev \
|
||||
libc6-dbg \
|
||||
${maybe_libiomp_dev} \
|
||||
libyaml-dev \
|
||||
libz-dev \
|
||||
libjpeg-dev \
|
||||
libasound2-dev \
|
||||
libsndfile-dev \
|
||||
${maybe_libomp_dev} \
|
||||
${maybe_libnccl_dev} \
|
||||
software-properties-common \
|
||||
wget \
|
||||
sudo \
|
||||
vim \
|
||||
jq \
|
||||
libtool \
|
||||
vim \
|
||||
unzip \
|
||||
gdb
|
||||
|
||||
# Should resolve issues related to various apt package repository cert issues
|
||||
# see: https://github.com/pytorch/pytorch/issues/65931
|
||||
apt-get install -y libgnutls30
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
# Need EPEL for many packages we depend on.
|
||||
# See http://fedoraproject.org/wiki/EPEL
|
||||
yum --enablerepo=extras install -y epel-release
|
||||
|
||||
ccache_deps="asciidoc docbook-dtds docbook-style-xsl libxslt"
|
||||
numpy_deps="gcc-gfortran"
|
||||
# Note: protobuf-c-{compiler,devel} on CentOS are too old to be used
|
||||
# for Caffe2. That said, we still install them to make sure the build
|
||||
# system opts to build/use protoc and libprotobuf from third-party.
|
||||
yum install -y \
|
||||
$ccache_deps \
|
||||
$numpy_deps \
|
||||
autoconf \
|
||||
automake \
|
||||
bzip2 \
|
||||
cmake \
|
||||
cmake3 \
|
||||
curl \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
gflags-devel \
|
||||
git \
|
||||
glibc-devel \
|
||||
glibc-headers \
|
||||
glog-devel \
|
||||
hiredis-devel \
|
||||
libstdc++-devel \
|
||||
libsndfile-devel \
|
||||
make \
|
||||
opencv-devel \
|
||||
sudo \
|
||||
wget \
|
||||
vim \
|
||||
unzip \
|
||||
gdb
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
rm -rf /var/lib/yum/yumdb
|
||||
rm -rf /var/lib/yum/history
|
||||
}
|
||||
|
||||
# Install base packages depending on the base OS
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
centos)
|
||||
install_centos
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Install Valgrind separately since the apt-get version is too old.
|
||||
mkdir valgrind_build && cd valgrind_build
|
||||
VALGRIND_VERSION=3.20.0
|
||||
wget https://ossci-linux.s3.amazonaws.com/valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
tar -xjf valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
cd valgrind-${VALGRIND_VERSION}
|
||||
./configure --prefix=/usr/local
|
||||
make -j6
|
||||
sudo make install
|
||||
cd ../../
|
||||
rm -rf valgrind_build
|
||||
alias valgrind="/usr/local/bin/valgrind"
|
||||
@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$CLANG_VERSION" ]; then
|
||||
|
||||
if [[ $CLANG_VERSION == 9 && $UBUNTU_VERSION == 18.04 ]]; then
|
||||
sudo apt-get update
|
||||
# gpg-agent is not available by default on 18.04
|
||||
sudo apt-get install -y --no-install-recommends gpg-agent
|
||||
wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
apt-add-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-${CLANG_VERSION} main"
|
||||
elif [[ $UBUNTU_VERSION == 22.04 ]]; then
|
||||
# work around ubuntu apt-get conflicts
|
||||
sudo apt-get -y -f install
|
||||
fi
|
||||
|
||||
sudo apt-get update
|
||||
apt-get install -y --no-install-recommends clang-"$CLANG_VERSION"
|
||||
apt-get install -y --no-install-recommends llvm-"$CLANG_VERSION"
|
||||
|
||||
# Install dev version of LLVM.
|
||||
if [ -n "$LLVMDEV" ]; then
|
||||
sudo apt-get install -y --no-install-recommends llvm-"$CLANG_VERSION"-dev
|
||||
fi
|
||||
|
||||
# Use update-alternatives to make this version the default
|
||||
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-"$CLANG_VERSION" 50
|
||||
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-"$CLANG_VERSION" 50
|
||||
# Override cc/c++ to clang as well
|
||||
update-alternatives --install /usr/bin/cc cc /usr/bin/clang 50
|
||||
update-alternatives --install /usr/bin/c++ c++ /usr/bin/clang++ 50
|
||||
|
||||
# clang's packaging is a little messed up (the runtime libs aren't
|
||||
# added into the linker path), so give it a little help
|
||||
clang_lib=("/usr/lib/llvm-$CLANG_VERSION/lib/clang/"*"/lib/linux")
|
||||
echo "$clang_lib" > /etc/ld.so.conf.d/clang.conf
|
||||
ldconfig
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
fi
|
||||
@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
[ -n "$CMAKE_VERSION" ]
|
||||
|
||||
# Remove system cmake install so it won't get used instead
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
apt-get remove cmake -y
|
||||
;;
|
||||
centos)
|
||||
yum remove cmake -y
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Turn 3.6.3 into v3.6
|
||||
path=$(echo "${CMAKE_VERSION}" | sed -e 's/\([0-9].[0-9]\+\).*/v\1/')
|
||||
file="cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz"
|
||||
|
||||
# Download and install specific CMake version in /usr/local
|
||||
pushd /tmp
|
||||
curl -Os --retry 3 "https://cmake.org/files/${path}/${file}"
|
||||
tar -C /usr/local --strip-components 1 --no-same-owner -zxf cmake-*.tar.gz
|
||||
rm -f cmake-*.tar.gz
|
||||
popd
|
||||
@ -1,102 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# Optionally install conda
|
||||
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
BASE_URL="https://repo.anaconda.com/miniconda"
|
||||
|
||||
MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1)
|
||||
MINOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 2)
|
||||
|
||||
case "$MAJOR_PYTHON_VERSION" in
|
||||
2)
|
||||
CONDA_FILE="Miniconda2-latest-Linux-x86_64.sh"
|
||||
;;
|
||||
3)
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir -p /opt/conda
|
||||
chown jenkins:jenkins /opt/conda
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
pushd /tmp
|
||||
wget -q "${BASE_URL}/${CONDA_FILE}"
|
||||
# NB: Manually invoke bash per https://github.com/conda/conda/issues/10431
|
||||
as_jenkins bash "${CONDA_FILE}" -b -f -p "/opt/conda"
|
||||
popd
|
||||
|
||||
# NB: Don't do this, rely on the rpath to get it right
|
||||
#echo "/opt/conda/lib" > /etc/ld.so.conf.d/conda-python.conf
|
||||
#ldconfig
|
||||
sed -e 's|PATH="\(.*\)"|PATH="/opt/conda/bin:\1"|g' -i /etc/environment
|
||||
export PATH="/opt/conda/bin:$PATH"
|
||||
|
||||
# Ensure we run conda in a directory that jenkins has write access to
|
||||
pushd /opt/conda
|
||||
|
||||
# Prevent conda from updating to 4.14.0, which causes docker build failures
|
||||
# See https://hud.pytorch.org/pytorch/pytorch/commit/754d7f05b6841e555cea5a4b2c505dd9e0baec1d
|
||||
# Uncomment the below when resolved to track the latest conda update
|
||||
# as_jenkins conda update -y -n base conda
|
||||
|
||||
# Install correct Python version
|
||||
as_jenkins conda create -n py_$ANACONDA_PYTHON_VERSION -y python="$ANACONDA_PYTHON_VERSION"
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2021.4.0 mkl-include=2021.4.0 setuptools"
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.11" ]; then
|
||||
conda_install numpy=1.23.5 ${CONDA_COMMON_DEPS}
|
||||
else
|
||||
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS}
|
||||
fi
|
||||
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
# and libpython-static for torch deploy
|
||||
conda_install llvmdev=8.0.0 "libpython-static=${ANACONDA_PYTHON_VERSION}"
|
||||
|
||||
# Use conda cmake in some cases. Conda cmake will be newer than our supported
|
||||
# min version (3.5 for xenial and 3.10 for bionic), so we only do it in those
|
||||
# following builds that we know should use conda. Specifically, Ubuntu bionic
|
||||
# and focal cannot find conda mkl with stock cmake, so we need a cmake from conda
|
||||
if [ -n "${CONDA_CMAKE}" ]; then
|
||||
conda_install cmake
|
||||
fi
|
||||
|
||||
# Magma package names are concatenation of CUDA major and minor ignoring revision
|
||||
# I.e. magma-cuda102 package corresponds to CUDA_VERSION=10.2 and CUDA_VERSION=10.2.89
|
||||
if [ -n "$CUDA_VERSION" ]; then
|
||||
conda_install magma-cuda$(TMP=${CUDA_VERSION/./};echo ${TMP%.*[0-9]}) -c pytorch
|
||||
fi
|
||||
|
||||
# Install some other packages, including those needed for Python test reporting
|
||||
pip_install -r /opt/conda/requirements-ci.txt
|
||||
|
||||
pip_install -U scikit-learn
|
||||
|
||||
if [ -n "$DOCS" ]; then
|
||||
apt-get update
|
||||
apt-get -y install expect-dev
|
||||
|
||||
# We are currently building docs with python 3.8 (min support version)
|
||||
pip_install -r /opt/conda/requirements-docs.txt
|
||||
fi
|
||||
|
||||
# HACK HACK HACK
|
||||
# gcc-9 for ubuntu-18.04 from http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu
|
||||
# Pulls llibstdc++6 13.1.0-8ubuntu1~18.04 which is too new for conda
|
||||
# So remove libstdc++6.so.3.29 installed by https://anaconda.org/anaconda/libstdcxx-ng/files?version=11.2.0
|
||||
# Same is true for gcc-12 from Ubuntu-22.04
|
||||
if grep -e [12][82].04.[623] /etc/issue >/dev/null; then
|
||||
rm /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/lib/libstdc++.so.6
|
||||
fi
|
||||
|
||||
popd
|
||||
fi
|
||||
@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
mkdir tmp_cudnn && cd tmp_cudnn
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive"
|
||||
if [[ ${CUDA_VERSION:0:4} == "12.1" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.9.2.26_cuda12-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/${CUDNN_NAME}.tar.xz
|
||||
elif [[ ${CUDA_VERSION:0:4} == "11.8" ]]; then
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.7.0.84_cuda11-archive"
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.7.0/local_installers/11.8/${CUDNN_NAME}.tar.xz
|
||||
else
|
||||
curl --retry 3 -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/${CUDNN_NAME}.tar.xz
|
||||
fi
|
||||
|
||||
tar xf ${CUDNN_NAME}.tar.xz
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/include/
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/include/x86_64-linux-gnu/
|
||||
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/lib/x86_64-linux-gnu/
|
||||
cd ..
|
||||
rm -rf tmp_cudnn
|
||||
ldconfig
|
||||
fi
|
||||
@ -1,25 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$KATEX" ]; then
|
||||
apt-get update
|
||||
# Ignore error if gpg-agent doesn't exist (for Ubuntu 16.04)
|
||||
apt-get install -y gpg-agent || :
|
||||
|
||||
curl --retry 3 -sL https://deb.nodesource.com/setup_16.x | sudo -E bash -
|
||||
sudo apt-get install -y nodejs
|
||||
|
||||
curl --retry 3 -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
|
||||
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends yarn
|
||||
yarn global add katex --prefix /usr/local
|
||||
|
||||
sudo apt-get -y install doxygen
|
||||
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
fi
|
||||
@ -1,62 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
clone_executorch() {
|
||||
EXECUTORCH_PINNED_COMMIT=$(get_pinned_commit executorch)
|
||||
|
||||
# Clone the Executorch
|
||||
git clone https://github.com/pytorch/executorch.git
|
||||
|
||||
# and fetch the target commit
|
||||
pushd executorch
|
||||
git checkout "${EXECUTORCH_PINNED_COMMIT}"
|
||||
git submodule update --init
|
||||
popd
|
||||
|
||||
chown -R jenkins executorch
|
||||
}
|
||||
|
||||
install_buck2() {
|
||||
pushd executorch/.ci/docker
|
||||
|
||||
BUCK2_VERSION=$(cat ci_commit_pins/buck2.txt)
|
||||
source common/install_buck.sh
|
||||
|
||||
popd
|
||||
}
|
||||
|
||||
install_conda_dependencies() {
|
||||
pushd executorch/.ci/docker
|
||||
# Install conda dependencies like flatbuffer
|
||||
conda_install --file conda-env-ci.txt
|
||||
popd
|
||||
}
|
||||
|
||||
install_pip_dependencies() {
|
||||
pushd executorch/.ci/docker
|
||||
# Install all Python dependencies
|
||||
pip_install -r requirements-ci.txt
|
||||
popd
|
||||
}
|
||||
|
||||
setup_executorch() {
|
||||
pushd executorch
|
||||
source .ci/scripts/utils.sh
|
||||
|
||||
install_flatc_from_source
|
||||
pip_install .
|
||||
build_executorch_runner "cmake"
|
||||
|
||||
# Make sure that all the newly generate files are owned by Jenkins
|
||||
chown -R jenkins .
|
||||
popd
|
||||
}
|
||||
|
||||
clone_executorch
|
||||
install_buck2
|
||||
install_conda_dependencies
|
||||
install_pip_dependencies
|
||||
setup_executorch
|
||||
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$GCC_VERSION" ]; then
|
||||
|
||||
# Need the official toolchain repo to get alternate packages
|
||||
add-apt-repository ppa:ubuntu-toolchain-r/test
|
||||
apt-get update
|
||||
apt-get install -y g++-$GCC_VERSION
|
||||
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50
|
||||
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50
|
||||
update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50
|
||||
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
fi
|
||||
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
function install_huggingface() {
|
||||
local version
|
||||
commit=$(get_pinned_commit huggingface)
|
||||
pip_install pandas==2.0.3
|
||||
pip_install "git+https://github.com/huggingface/transformers@${commit}"
|
||||
}
|
||||
|
||||
function install_timm() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit timm)
|
||||
pip_install pandas==2.0.3
|
||||
pip_install "git+https://github.com/huggingface/pytorch-image-models@${commit}"
|
||||
# Clean up
|
||||
conda_run pip uninstall -y cmake torch torchvision triton
|
||||
}
|
||||
|
||||
# Pango is needed for weasyprint which is needed for doctr
|
||||
conda_install pango
|
||||
install_huggingface
|
||||
install_timm
|
||||
@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ]; then
|
||||
apt update
|
||||
apt-get install -y clang doxygen git graphviz nodejs npm libtinfo5
|
||||
fi
|
||||
|
||||
# Do shallow clone of PyTorch so that we can init lintrunner in Docker build context
|
||||
git clone https://github.com/pytorch/pytorch.git --depth 1
|
||||
chown -R jenkins pytorch
|
||||
|
||||
pushd pytorch
|
||||
# Install all linter dependencies
|
||||
pip_install -r requirements.txt
|
||||
conda_run lintrunner init
|
||||
|
||||
# Cache .lintbin directory as part of the Docker image
|
||||
cp -r .lintbin /tmp
|
||||
popd
|
||||
|
||||
# Node dependencies required by toc linter job
|
||||
npm install -g markdown-toc
|
||||
|
||||
# Cleaning up
|
||||
rm -rf pytorch
|
||||
@ -1,50 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
retry () {
|
||||
"$@" || (sleep 10 && "$@") || (sleep 20 && "$@") || (sleep 40 && "$@")
|
||||
}
|
||||
|
||||
# A bunch of custom pip dependencies for ONNX
|
||||
pip_install \
|
||||
beartype==0.15.0 \
|
||||
filelock==3.9.0 \
|
||||
flatbuffers==2.0 \
|
||||
mock==5.0.1 \
|
||||
ninja==1.10.2 \
|
||||
networkx==2.0 \
|
||||
numpy==1.24.2
|
||||
|
||||
# ONNXRuntime should be installed before installing
|
||||
# onnx-weekly. Otherwise, onnx-weekly could be
|
||||
# overwritten by onnx.
|
||||
pip_install \
|
||||
parameterized==0.8.1 \
|
||||
pytest-cov==4.0.0 \
|
||||
pytest-subtests==0.10.0 \
|
||||
tabulate==0.9.0 \
|
||||
transformers==4.32.1
|
||||
|
||||
pip_install coloredlogs packaging
|
||||
retry pip_install -i https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ --no-cache-dir --no-input ort-nightly==1.17.0.dev20231005006
|
||||
|
||||
pip_install -i https://test.pypi.org/simple/ onnx==1.15.0rc2
|
||||
pip_install onnxscript==0.1.0.dev20231128 --no-deps
|
||||
|
||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
||||
IMPORT_SCRIPT_FILENAME="/tmp/onnx_import_script.py"
|
||||
as_jenkins echo 'import transformers; transformers.AutoModel.from_pretrained("sshleifer/tiny-gpt2"); transformers.AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2");' > "${IMPORT_SCRIPT_FILENAME}"
|
||||
|
||||
# Need a PyTorch version for transformers to work
|
||||
pip_install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
# Very weird quoting behavior here https://github.com/conda/conda/issues/10972,
|
||||
# so echo the command to a file and run the file instead
|
||||
conda_run python "${IMPORT_SCRIPT_FILENAME}"
|
||||
|
||||
# Cleaning up
|
||||
conda_run pip uninstall -y torch
|
||||
rm "${IMPORT_SCRIPT_FILENAME}" || true
|
||||
@ -1,174 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
ver() {
|
||||
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
|
||||
}
|
||||
|
||||
# Map ROCm version to AMDGPU version
|
||||
declare -A AMDGPU_VERSIONS=( ["5.0"]="21.50" ["5.1.1"]="22.10.1" ["5.2"]="22.20" )
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
if [[ $UBUNTU_VERSION == 18.04 ]]; then
|
||||
# gpg-agent is not available by default on 18.04
|
||||
apt-get install -y --no-install-recommends gpg-agent
|
||||
fi
|
||||
if [[ $UBUNTU_VERSION == 20.04 ]]; then
|
||||
# gpg-agent is not available by default on 20.04
|
||||
apt-get install -y --no-install-recommends gpg-agent
|
||||
fi
|
||||
apt-get install -y kmod
|
||||
apt-get install -y wget
|
||||
|
||||
# Need the libc++1 and libc++abi1 libraries to allow torch._C to load at runtime
|
||||
apt-get install -y libc++1
|
||||
apt-get install -y libc++abi1
|
||||
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then
|
||||
# Add amdgpu repository
|
||||
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
|
||||
local amdgpu_baseurl
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/ubuntu"
|
||||
else
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/ubuntu"
|
||||
fi
|
||||
echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
|
||||
fi
|
||||
|
||||
ROCM_REPO="ubuntu"
|
||||
if [[ $(ver $ROCM_VERSION) -lt $(ver 4.2) ]]; then
|
||||
ROCM_REPO="xenial"
|
||||
fi
|
||||
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then
|
||||
ROCM_REPO="${UBUNTU_VERSION_NAME}"
|
||||
fi
|
||||
|
||||
# Add rocm repository
|
||||
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
|
||||
local rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
|
||||
echo "deb [arch=amd64] ${rocm_baseurl} ${ROCM_REPO} main" > /etc/apt/sources.list.d/rocm.list
|
||||
apt-get update --allow-insecure-repositories
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
|
||||
rocm-dev \
|
||||
rocm-utils \
|
||||
rocm-libs \
|
||||
rccl \
|
||||
rocprofiler-dev \
|
||||
roctracer-dev
|
||||
|
||||
# precompiled miopen kernels added in ROCm 3.5, renamed in ROCm 5.5
|
||||
# search for all unversioned packages
|
||||
# if search fails it will abort this script; use true to avoid case where search fails
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.5) ]]; then
|
||||
MIOPENHIPGFX=$(apt-cache search --names-only miopen-hip-gfx | awk '{print $1}' | grep -F -v . || true)
|
||||
if [[ "x${MIOPENHIPGFX}" = x ]]; then
|
||||
echo "miopen-hip-gfx package not available" && exit 1
|
||||
else
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENHIPGFX}
|
||||
fi
|
||||
else
|
||||
MIOPENKERNELS=$(apt-cache search --names-only miopenkernels | awk '{print $1}' | grep -F -v . || true)
|
||||
if [[ "x${MIOPENKERNELS}" = x ]]; then
|
||||
echo "miopenkernels package not available" && exit 1
|
||||
else
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENKERNELS}
|
||||
fi
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
|
||||
yum update -y
|
||||
yum install -y kmod
|
||||
yum install -y wget
|
||||
yum install -y openblas-devel
|
||||
|
||||
yum install -y epel-release
|
||||
yum install -y dkms kernel-headers-`uname -r` kernel-devel-`uname -r`
|
||||
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then
|
||||
# Add amdgpu repository
|
||||
local amdgpu_baseurl
|
||||
if [[ $OS_VERSION == 9 ]]; then
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/9.0/main/x86_64"
|
||||
else
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.3) ]]; then
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/7.9/main/x86_64"
|
||||
else
|
||||
amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/7.9/main/x86_64"
|
||||
fi
|
||||
fi
|
||||
echo "[AMDGPU]" > /etc/yum.repos.d/amdgpu.repo
|
||||
echo "name=AMDGPU" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "baseurl=${amdgpu_baseurl}" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "gpgcheck=1" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/amdgpu.repo
|
||||
fi
|
||||
|
||||
local rocm_baseurl="http://repo.radeon.com/rocm/yum/${ROCM_VERSION}"
|
||||
echo "[ROCm]" > /etc/yum.repos.d/rocm.repo
|
||||
echo "name=ROCm" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "baseurl=${rocm_baseurl}" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "enabled=1" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "gpgcheck=1" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/rocm.repo
|
||||
|
||||
yum update -y
|
||||
|
||||
yum install -y \
|
||||
rocm-dev \
|
||||
rocm-utils \
|
||||
rocm-libs \
|
||||
rccl \
|
||||
rocprofiler-dev \
|
||||
roctracer-dev
|
||||
|
||||
# precompiled miopen kernels; search for all unversioned packages
|
||||
# if search fails it will abort this script; use true to avoid case where search fails
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 5.5) ]]; then
|
||||
MIOPENHIPGFX=$(yum -q search miopen-hip-gfx | grep miopen-hip-gfx | awk '{print $1}'| grep -F kdb. || true)
|
||||
if [[ "x${MIOPENHIPGFX}" = x ]]; then
|
||||
echo "miopen-hip-gfx package not available" && exit 1
|
||||
else
|
||||
yum install -y ${MIOPENHIPGFX}
|
||||
fi
|
||||
else
|
||||
MIOPENKERNELS=$(yum -q search miopenkernels | grep miopenkernels- | awk '{print $1}'| grep -F kdb. || true)
|
||||
if [[ "x${MIOPENKERNELS}" = x ]]; then
|
||||
echo "miopenkernels package not available" && exit 1
|
||||
else
|
||||
yum install -y ${MIOPENKERNELS}
|
||||
fi
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
rm -rf /var/lib/yum/yumdb
|
||||
rm -rf /var/lib/yum/history
|
||||
}
|
||||
|
||||
# Install Python packages depending on the base OS
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
centos)
|
||||
install_centos
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# "install" hipMAGMA into /opt/rocm/magma by copying after build
|
||||
git clone https://bitbucket.org/icl/magma.git
|
||||
pushd magma
|
||||
|
||||
# Version 2.7.2 + ROCm related updates
|
||||
git checkout 823531632140d0edcb7e77c3edc0e837421471c5
|
||||
|
||||
cp make.inc-examples/make.inc.hip-gcc-mkl make.inc
|
||||
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
|
||||
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc
|
||||
echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc
|
||||
export PATH="${PATH}:/opt/rocm/bin"
|
||||
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then
|
||||
amdgpu_targets=`echo $PYTORCH_ROCM_ARCH | sed 's/;/ /g'`
|
||||
else
|
||||
amdgpu_targets=`rocm_agent_enumerator | grep -v gfx000 | sort -u | xargs`
|
||||
fi
|
||||
for arch in $amdgpu_targets; do
|
||||
echo "DEVCCFLAGS += --offload-arch=$arch" >> make.inc
|
||||
done
|
||||
# hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition
|
||||
sed -i 's/^FOPENMP/#FOPENMP/g' make.inc
|
||||
make -f make.gen.hipMAGMA -j $(nproc)
|
||||
LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION
|
||||
make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda/envs/py_$ANACONDA_PYTHON_VERSION
|
||||
popd
|
||||
mv magma /opt/rocm
|
||||
@ -1,68 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
get_conda_version() {
|
||||
as_jenkins conda list -n py_$ANACONDA_PYTHON_VERSION | grep -w $* | head -n 1 | awk '{print $2}'
|
||||
}
|
||||
|
||||
conda_reinstall() {
|
||||
as_jenkins conda install -q -n py_$ANACONDA_PYTHON_VERSION -y --force-reinstall $*
|
||||
}
|
||||
|
||||
if [ -n "${ROCM_VERSION}" ]; then
|
||||
TRITON_REPO="https://github.com/ROCmSoftwarePlatform/triton"
|
||||
TRITON_TEXT_FILE="triton-rocm"
|
||||
else
|
||||
TRITON_REPO="https://github.com/openai/triton"
|
||||
TRITON_TEXT_FILE="triton"
|
||||
fi
|
||||
|
||||
# The logic here is copied from .ci/pytorch/common_utils.sh
|
||||
TRITON_PINNED_COMMIT=$(get_pinned_commit ${TRITON_TEXT_FILE})
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ];then
|
||||
apt update
|
||||
apt-get install -y gpg-agent
|
||||
fi
|
||||
|
||||
if [ -n "${CONDA_CMAKE}" ]; then
|
||||
# Keep the current cmake and numpy version here, so we can reinstall them later
|
||||
CMAKE_VERSION=$(get_conda_version cmake)
|
||||
NUMPY_VERSION=$(get_conda_version numpy)
|
||||
fi
|
||||
|
||||
if [ -z "${MAX_JOBS}" ]; then
|
||||
export MAX_JOBS=$(nproc)
|
||||
fi
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ] && [ -n "${GCC_VERSION}" ] && [[ "${GCC_VERSION}" == "7" ]]; then
|
||||
# Triton needs at least gcc-9 to build
|
||||
apt-get install -y g++-9
|
||||
|
||||
CXX=g++-9 pip_install "git+${TRITON_REPO}@${TRITON_PINNED_COMMIT}#subdirectory=python"
|
||||
elif [ -n "${UBUNTU_VERSION}" ] && [ -n "${CLANG_VERSION}" ]; then
|
||||
# Triton needs <filesystem> which surprisingly is not available with clang-9 toolchain
|
||||
add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
apt-get install -y g++-9
|
||||
|
||||
CXX=g++-9 pip_install "git+${TRITON_REPO}@${TRITON_PINNED_COMMIT}#subdirectory=python"
|
||||
else
|
||||
pip_install "git+${TRITON_REPO}@${TRITON_PINNED_COMMIT}#subdirectory=python"
|
||||
fi
|
||||
|
||||
if [ -n "${CONDA_CMAKE}" ]; then
|
||||
# TODO: This is to make sure that the same cmake and numpy version from install conda
|
||||
# script is used. Without this step, the newer cmake version (3.25.2) downloaded by
|
||||
# triton build step via pip will fail to detect conda MKL. Once that issue is fixed,
|
||||
# this can be removed.
|
||||
#
|
||||
# The correct numpy version also needs to be set here because conda claims that it
|
||||
# causes inconsistent environment. Without this, conda will attempt to install the
|
||||
# latest numpy version, which fails ASAN tests with the following import error: Numba
|
||||
# needs NumPy 1.20 or less.
|
||||
conda_reinstall cmake="${CMAKE_VERSION}"
|
||||
conda_reinstall numpy="${NUMPY_VERSION}"
|
||||
fi
|
||||
@ -1,48 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [[ -d "/usr/local/cuda/" ]]; then
|
||||
with_cuda=/usr/local/cuda/
|
||||
else
|
||||
with_cuda=no
|
||||
fi
|
||||
|
||||
function install_ucx() {
|
||||
set -ex
|
||||
git clone --recursive https://github.com/openucx/ucx.git
|
||||
pushd ucx
|
||||
git checkout ${UCX_COMMIT}
|
||||
git submodule update --init --recursive
|
||||
|
||||
./autogen.sh
|
||||
./configure --prefix=$UCX_HOME \
|
||||
--enable-mt \
|
||||
--with-cuda=$with_cuda \
|
||||
--enable-profiling \
|
||||
--enable-stats
|
||||
time make -j
|
||||
sudo make install
|
||||
|
||||
popd
|
||||
rm -rf ucx
|
||||
}
|
||||
|
||||
function install_ucc() {
|
||||
set -ex
|
||||
git clone --recursive https://github.com/openucx/ucc.git
|
||||
pushd ucc
|
||||
git checkout ${UCC_COMMIT}
|
||||
git submodule update --init --recursive
|
||||
|
||||
./autogen.sh
|
||||
./configure --prefix=$UCC_HOME --with-ucx=$UCX_HOME --with-cuda=$with_cuda
|
||||
time make -j
|
||||
sudo make install
|
||||
|
||||
popd
|
||||
rm -rf ucc
|
||||
}
|
||||
|
||||
install_ucx
|
||||
install_ucc
|
||||
@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# Mirror jenkins user in container
|
||||
# jenkins user as ec2-user should have the same user-id
|
||||
echo "jenkins:x:1000:1000::/var/lib/jenkins:" >> /etc/passwd
|
||||
echo "jenkins:x:1000:" >> /etc/group
|
||||
# Needed on focal or newer
|
||||
echo "jenkins:*:19110:0:99999:7:::" >>/etc/shadow
|
||||
|
||||
# Create $HOME
|
||||
mkdir -p /var/lib/jenkins
|
||||
chown jenkins:jenkins /var/lib/jenkins
|
||||
mkdir -p /var/lib/jenkins/.ccache
|
||||
chown jenkins:jenkins /var/lib/jenkins/.ccache
|
||||
|
||||
# Allow writing to /usr/local (for make install)
|
||||
chown jenkins:jenkins /usr/local
|
||||
|
||||
# Allow sudo
|
||||
# TODO: Maybe we shouldn't
|
||||
echo 'jenkins ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/jenkins
|
||||
|
||||
# Work around bug where devtoolset replaces sudo and breaks it.
|
||||
if [ -n "$DEVTOOLSET_VERSION" ]; then
|
||||
SUDO=/bin/sudo
|
||||
else
|
||||
SUDO=sudo
|
||||
fi
|
||||
|
||||
# Test that sudo works
|
||||
$SUDO -u jenkins $SUDO -v
|
||||
@ -1,44 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install missing libomp-dev
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libomp-dev && apt-get autoclean && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG CONDA_CMAKE
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install cuda and cudnn
|
||||
ARG CUDA_VERSION
|
||||
RUN wget -q https://raw.githubusercontent.com/pytorch/builder/main/common/install_cuda.sh -O install_cuda.sh
|
||||
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh
|
||||
ENV DESIRED_CUDA ${CUDA_VERSION}
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
|
||||
|
||||
# Note that Docker build forbids copying file outside the build context
|
||||
COPY ./common/install_linter.sh install_linter.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_linter.sh
|
||||
RUN rm install_linter.sh common_utils.sh
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,34 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG CONDA_CMAKE
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
|
||||
|
||||
# Note that Docker build forbids copying file outside the build context
|
||||
COPY ./common/install_linter.sh install_linter.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_linter.sh
|
||||
RUN rm install_linter.sh common_utils.sh
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,300 +0,0 @@
|
||||
# Python dependencies required for unit tests
|
||||
|
||||
#awscli==1.6 #this breaks some platforms
|
||||
#Description: AWS command line interface
|
||||
#Pinned versions: 1.6
|
||||
#test that import:
|
||||
|
||||
boto3==1.19.12
|
||||
#Description: AWS SDK for python
|
||||
#Pinned versions: 1.19.12, 1.16.34
|
||||
#test that import:
|
||||
|
||||
click
|
||||
#Description: Command Line Interface Creation Kit
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
coremltools==5.0b5
|
||||
#Description: Apple framework for ML integration
|
||||
#Pinned versions: 5.0b5
|
||||
#test that import:
|
||||
|
||||
#dataclasses #this breaks some platforms
|
||||
#Description: Provides decorators for auto adding special methods to user classes
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
expecttest==0.1.6
|
||||
#Description: method for writing tests where test framework auto populates
|
||||
# the expected output based on previous runs
|
||||
#Pinned versions: 0.1.6
|
||||
#test that import:
|
||||
|
||||
flatbuffers==2.0
|
||||
#Description: cross platform serialization library
|
||||
#Pinned versions: 2.0
|
||||
#test that import:
|
||||
|
||||
hypothesis==5.35.1
|
||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
#Description: advanced library for generating parametrized tests
|
||||
#Pinned versions: 3.44.6, 4.53.2
|
||||
#test that import: test_xnnpack_integration.py, test_pruning_op.py, test_nn.py
|
||||
|
||||
junitparser==2.1.1
|
||||
#Description: unitparser handles JUnit/xUnit Result XML files
|
||||
#Pinned versions: 2.1.1
|
||||
#test that import:
|
||||
|
||||
librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Description: A python package for music and audio analysis
|
||||
#Pinned versions: >=0.6.2
|
||||
#test that import: test_spectral_ops.py
|
||||
|
||||
#mkl #this breaks linux-bionic-rocm4.5-py3.7
|
||||
#Description: Intel oneAPI Math Kernel Library
|
||||
#Pinned versions:
|
||||
#test that import: test_profiler.py, test_public_bindings.py, test_testing.py,
|
||||
#test_nn.py, test_mkldnn.py, test_jit.py, test_fx_experimental.py,
|
||||
#test_autograd.py
|
||||
|
||||
#mkl-devel
|
||||
# see mkl
|
||||
|
||||
#mock
|
||||
#Description: A testing library that allows you to replace parts of your
|
||||
#system under test with mock objects
|
||||
#Pinned versions:
|
||||
#test that import: test_module_init.py, test_modules.py, test_nn.py,
|
||||
#test_testing.py
|
||||
|
||||
#MonkeyType # breaks pytorch-xla-linux-bionic-py3.7-clang8
|
||||
#Description: collects runtime types of function arguments and return
|
||||
#values, and can automatically generate stub files
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==1.7.0
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 1.7.0
|
||||
#test that import: test_typing.py, test_type_hints.py
|
||||
|
||||
networkx==2.8.8
|
||||
#Description: creation, manipulation, and study of
|
||||
#the structure, dynamics, and functions of complex networks
|
||||
#Pinned versions: 2.8.8
|
||||
#test that import: functorch
|
||||
|
||||
#ninja
|
||||
#Description: build system. Note that it install from
|
||||
#here breaks things so it is commented out
|
||||
#Pinned versions: 1.10.0.post1
|
||||
#test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py
|
||||
|
||||
numba==0.49.0 ; python_version < "3.9"
|
||||
numba==0.54.1 ; python_version == "3.9"
|
||||
numba==0.55.2 ; python_version == "3.10"
|
||||
#Description: Just-In-Time Compiler for Numerical Functions
|
||||
#Pinned versions: 0.54.1, 0.49.0, <=0.49.1
|
||||
#test that import: test_numba_integration.py
|
||||
#For numba issue see https://github.com/pytorch/pytorch/issues/51511
|
||||
|
||||
#numpy
|
||||
#Description: Provides N-dimensional arrays and linear algebra
|
||||
#Pinned versions: 1.20
|
||||
#test that import: test_view_ops.py, test_unary_ufuncs.py, test_type_promotion.py,
|
||||
#test_type_info.py, test_torch.py, test_tensorexpr_pybind.py, test_tensorexpr.py,
|
||||
#test_tensorboard.py, test_tensor_creation_ops.py, test_static_runtime.py,
|
||||
#test_spectral_ops.py, test_sort_and_select.py, test_shape_ops.py,
|
||||
#test_segment_reductions.py, test_reductions.py, test_pruning_op.py,
|
||||
#test_overrides.py, test_numpy_interop.py, test_numba_integration.py
|
||||
#test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py,
|
||||
#test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py,
|
||||
#test_binary_ufuncs.py
|
||||
|
||||
#onnxruntime
|
||||
#Description: scoring engine for Open Neural Network Exchange (ONNX) models
|
||||
#Pinned versions: 1.9.0
|
||||
#test that import:
|
||||
|
||||
opt-einsum==3.3
|
||||
#Description: Python library to optimize tensor contraction order, used in einsum
|
||||
#Pinned versions: 3.3
|
||||
#test that import: test_linalg.py
|
||||
|
||||
optree==0.9.1
|
||||
#Description: A library for tree manipulation
|
||||
#Pinned versions: 0.9.1
|
||||
#test that import: test_vmap.py, test_aotdispatch.py, test_dynamic_shapes.py,
|
||||
#test_pytree.py, test_ops.py, test_control_flow.py, test_modules.py,
|
||||
#common_utils.py, test_eager_transforms.py, test_python_dispatch.py,
|
||||
#test_expanded_weights.py, test_decomp.py, test_overrides.py, test_masked.py,
|
||||
#test_ops.py, test_prims.py, test_subclass.py, test_functionalization.py,
|
||||
#test_schema_check.py, test_profiler_tree.py, test_meta.py, test_torchxla_num_output.py,
|
||||
#test_utils.py, test_proxy_tensor.py, test_memory_profiler.py, test_view_ops.py,
|
||||
#test_pointwise_ops.py, test_dtensor_ops.py, test_torchinductor.py, test_fx.py,
|
||||
#test_fake_tensor.py, test_mps.py
|
||||
|
||||
pillow==10.0.1
|
||||
#Description: Python Imaging Library fork
|
||||
#Pinned versions: 10.0.1
|
||||
#test that import:
|
||||
|
||||
protobuf==3.20.2
|
||||
#Description: Google’s data interchange format
|
||||
#Pinned versions: 3.20.1
|
||||
#test that import: test_tensorboard.py
|
||||
|
||||
psutil
|
||||
#Description: information on running processes and system utilization
|
||||
#Pinned versions:
|
||||
#test that import: test_profiler.py, test_openmp.py, test_dataloader.py
|
||||
|
||||
pytest==7.3.2
|
||||
#Description: testing framework
|
||||
#Pinned versions:
|
||||
#test that import: test_typing.py, test_cpp_extensions_aot.py, run_test.py
|
||||
|
||||
pytest-xdist==3.3.1
|
||||
#Description: plugin for running pytest in parallel
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
pytest-shard==0.1.2
|
||||
#Description: plugin spliting up tests in pytest
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
pytest-flakefinder==1.1.0
|
||||
#Description: plugin for rerunning tests a fixed number of times in pytest
|
||||
#Pinned versions: 1.1.0
|
||||
#test that import:
|
||||
|
||||
pytest-rerunfailures>=10.3
|
||||
#Description: plugin for rerunning failure tests in pytest
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#pytest-benchmark
|
||||
#Description: fixture for benchmarking code
|
||||
#Pinned versions: 3.2.3
|
||||
#test that import:
|
||||
|
||||
#pytest-sugar
|
||||
#Description: shows failures and errors instantly
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
xdoctest==1.1.0
|
||||
#Description: runs doctests in pytest
|
||||
#Pinned versions: 1.1.0
|
||||
#test that import:
|
||||
|
||||
pygments==2.15.0
|
||||
#Description: support doctest highlighting
|
||||
#Pinned versions: 2.12.0
|
||||
#test that import: the doctests
|
||||
|
||||
#PyYAML
|
||||
#Description: data serialization format
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#requests
|
||||
#Description: HTTP library
|
||||
#Pinned versions:
|
||||
#test that import: test_type_promotion.py
|
||||
|
||||
#rich
|
||||
#Description: rich text and beautiful formatting in the terminal
|
||||
#Pinned versions: 10.9.0
|
||||
#test that import:
|
||||
|
||||
scikit-image==0.19.3 ; python_version < "3.10"
|
||||
scikit-image==0.20.0 ; python_version >= "3.10"
|
||||
#Description: image processing routines
|
||||
#Pinned versions:
|
||||
#test that import: test_nn.py
|
||||
|
||||
#scikit-learn
|
||||
#Description: machine learning package
|
||||
#Pinned versions: 0.20.3
|
||||
#test that import:
|
||||
|
||||
scipy==1.6.3 ; python_version < "3.10"
|
||||
scipy==1.8.1 ; python_version == "3.10"
|
||||
scipy==1.10.1 ; python_version == "3.11"
|
||||
# Pin SciPy because of failing distribution tests (see #60347)
|
||||
#Description: scientific python
|
||||
#Pinned versions: 1.6.3
|
||||
#test that import: test_unary_ufuncs.py, test_torch.py,test_tensor_creation_ops.py
|
||||
#test_spectral_ops.py, test_sparse_csr.py, test_reductions.py,test_nn.py
|
||||
#test_linalg.py, test_binary_ufuncs.py
|
||||
|
||||
#tabulate
|
||||
#Description: Pretty-print tabular data
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
tb-nightly==2.13.0a20230426
|
||||
#Description: TensorBoard
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#typing-extensions
|
||||
#Description: type hints for python
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#virtualenv
|
||||
#Description: virtual environment for python
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
unittest-xml-reporting<=3.2.0,>=2.0.0
|
||||
#Description: saves unit test results to xml
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
lintrunner==0.10.7
|
||||
#Description: all about linters!
|
||||
#Pinned versions: 0.10.7
|
||||
#test that import:
|
||||
|
||||
rockset==1.0.3
|
||||
#Description: queries Rockset
|
||||
#Pinned versions: 1.0.3
|
||||
#test that import:
|
||||
|
||||
ghstack==0.7.1
|
||||
#Description: ghstack tool
|
||||
#Pinned versions: 0.7.1
|
||||
#test that import:
|
||||
|
||||
jinja2==3.1.2
|
||||
#Description: jinja2 template engine
|
||||
#Pinned versions: 3.1.2
|
||||
#test that import:
|
||||
|
||||
pytest-cpp==2.3.0
|
||||
#Description: This is used by pytest to invoke C++ tests
|
||||
#Pinned versions: 2.3.0
|
||||
#test that import:
|
||||
|
||||
z3-solver==4.12.2.0
|
||||
#Description: The Z3 Theorem Prover Project
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
tensorboard==2.13.0
|
||||
#Description: Also included in .ci/docker/requirements-docs.txt
|
||||
#Pinned versions:
|
||||
#test that import: test_tensorboard
|
||||
|
||||
pywavelets==1.4.1
|
||||
#Description: This is a requirement of scikit-image, we need to pin
|
||||
# it here because 1.5.0 conflicts with numpy 1.21.2 used in CI
|
||||
#Pinned versions: 1.4.1
|
||||
#test that import:
|
||||
@ -1,49 +0,0 @@
|
||||
sphinx==5.3.0
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 5.3.0
|
||||
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
|
||||
|
||||
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
|
||||
# but it doesn't seem to work and hangs around idly. The initial thought is probably
|
||||
# something related to Docker setup. We can investigate this later
|
||||
sphinxcontrib.katex==0.8.6
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 0.8.6
|
||||
|
||||
matplotlib==3.5.3
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 3.5.3
|
||||
|
||||
tensorboard==2.13.0
|
||||
#Description: This is used to generate PyTorch docs
|
||||
#Pinned versions: 2.13.0
|
||||
|
||||
breathe==4.34.0
|
||||
#Description: This is used to generate PyTorch C++ docs
|
||||
#Pinned versions: 4.34.0
|
||||
|
||||
exhale==0.2.3
|
||||
#Description: This is used to generate PyTorch C++ docs
|
||||
#Pinned versions: 0.2.3
|
||||
|
||||
docutils==0.16
|
||||
#Description: This is used to generate PyTorch C++ docs
|
||||
#Pinned versions: 0.16
|
||||
|
||||
bs4==0.0.1
|
||||
#Description: This is used to generate PyTorch C++ docs
|
||||
#Pinned versions: 0.0.1
|
||||
|
||||
IPython==8.12.0
|
||||
#Description: This is used to generate PyTorch functorch docs
|
||||
#Pinned versions: 8.12.0
|
||||
|
||||
myst-nb==0.17.2
|
||||
#Description: This is used to generate PyTorch functorch docs
|
||||
#Pinned versions: 0.13.2
|
||||
|
||||
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
|
||||
python-etcd==0.4.5
|
||||
sphinx-copybutton==0.5.0
|
||||
sphinx-panels==0.4.1
|
||||
myst-parser==0.18.1
|
||||
@ -1 +0,0 @@
|
||||
2.1.0
|
||||
@ -1,151 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG IMAGE_NAME
|
||||
|
||||
FROM ${IMAGE_NAME}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
|
||||
RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
ARG CONDA_CMAKE
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install clang
|
||||
ARG CLANG_VERSION
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
COPY ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
COPY ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# (optional) Install UCC
|
||||
ARG UCX_COMMIT
|
||||
ARG UCC_COMMIT
|
||||
ENV UCX_COMMIT $UCX_COMMIT
|
||||
ENV UCC_COMMIT $UCC_COMMIT
|
||||
ENV UCX_HOME /usr
|
||||
ENV UCC_HOME /usr
|
||||
ADD ./common/install_ucc.sh install_ucc.sh
|
||||
RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi
|
||||
RUN rm install_ucc.sh
|
||||
|
||||
COPY ./common/install_openssl.sh install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_DIR /opt/openssl
|
||||
|
||||
ARG INDUCTOR_BENCHMARKS
|
||||
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/huggingface.txt huggingface.txt
|
||||
COPY ci_commit_pins/timm.txt timm.txt
|
||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
ARG TRITON
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton.txt triton.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
# See https://github.com/pytorch/pytorch/issues/82174
|
||||
# TODO(sdym@fb.com):
|
||||
# check if this is needed after full off Xenial migration
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI true
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
ENV CMAKE_CUDA_COMPILER_LAUNCHER=/opt/cache/bin/sccache
|
||||
|
||||
# Add jni.h for java host build
|
||||
COPY ./common/install_jni.sh install_jni.sh
|
||||
COPY ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Install Open MPI for CUDA
|
||||
COPY ./common/install_openmpi.sh install_openmpi.sh
|
||||
RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi
|
||||
RUN rm install_openmpi.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
|
||||
ENV CUDA_PATH /usr/local/cuda
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
# Install CUDNN
|
||||
ARG CUDNN_VERSION
|
||||
ARG CUDA_VERSION
|
||||
COPY ./common/install_cudnn.sh install_cudnn.sh
|
||||
RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi
|
||||
RUN rm install_cudnn.sh
|
||||
|
||||
# Delete /usr/local/cuda-11.X/cuda-11.X symlinks
|
||||
RUN if [ -h /usr/local/cuda-11.6/cuda-11.6 ]; then rm /usr/local/cuda-11.6/cuda-11.6; fi
|
||||
RUN if [ -h /usr/local/cuda-11.7/cuda-11.7 ]; then rm /usr/local/cuda-11.7/cuda-11.7; fi
|
||||
RUN if [ -h /usr/local/cuda-12.1/cuda-12.1 ]; then rm /usr/local/cuda-12.1/cuda-12.1; fi
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,113 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Set AMD gpu targets to build for
|
||||
ARG PYTORCH_ROCM_ARCH
|
||||
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install clang
|
||||
ARG LLVMDEV
|
||||
ARG CLANG_VERSION
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG CONDA_CMAKE
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
COPY requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
COPY ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
COPY ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# Install rocm
|
||||
ARG ROCM_VERSION
|
||||
COPY ./common/install_rocm.sh install_rocm.sh
|
||||
RUN bash ./install_rocm.sh
|
||||
RUN rm install_rocm.sh
|
||||
COPY ./common/install_rocm_magma.sh install_rocm_magma.sh
|
||||
RUN bash ./install_rocm_magma.sh
|
||||
RUN rm install_rocm_magma.sh
|
||||
ENV ROCM_PATH /opt/rocm
|
||||
ENV PATH /opt/rocm/bin:$PATH
|
||||
ENV PATH /opt/rocm/hcc/bin:$PATH
|
||||
ENV PATH /opt/rocm/hip/bin:$PATH
|
||||
ENV PATH /opt/rocm/opencl/bin:$PATH
|
||||
ENV PATH /opt/rocm/llvm/bin:$PATH
|
||||
ENV MAGMA_HOME /opt/rocm/magma
|
||||
ENV LANG C.UTF-8
|
||||
ENV LC_ALL C.UTF-8
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
COPY ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
ARG TRITON
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton-rocm.txt triton-rocm.txt
|
||||
COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton-rocm.txt triton_version.txt
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,191 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
ARG CLANG_VERSION
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
COPY ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install clang
|
||||
ARG LLVMDEV
|
||||
COPY ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# Install user
|
||||
COPY ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
COPY ./common/install_docs_reqs.sh install_docs_reqs.sh
|
||||
RUN bash ./install_docs_reqs.sh && rm install_docs_reqs.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ARG CONDA_CMAKE
|
||||
ARG DOCS
|
||||
ENV ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION
|
||||
ENV PATH /opt/conda/envs/py_$ANACONDA_PYTHON_VERSION/bin:/opt/conda/bin:$PATH
|
||||
ENV DOCS=$DOCS
|
||||
COPY requirements-ci.txt requirements-docs.txt /opt/conda/
|
||||
COPY ./common/install_conda.sh install_conda.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh common_utils.sh /opt/conda/requirements-ci.txt /opt/conda/requirements-docs.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
COPY ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install lcov for C++ code coverage
|
||||
COPY ./common/install_lcov.sh install_lcov.sh
|
||||
RUN bash ./install_lcov.sh && rm install_lcov.sh
|
||||
|
||||
# Install cuda and cudnn
|
||||
ARG CUDA_VERSION
|
||||
RUN wget -q https://raw.githubusercontent.com/pytorch/builder/main/common/install_cuda.sh -O install_cuda.sh
|
||||
RUN bash ./install_cuda.sh ${CUDA_VERSION} && rm install_cuda.sh
|
||||
ENV DESIRED_CUDA ${CUDA_VERSION}
|
||||
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
|
||||
|
||||
# (optional) Install UCC
|
||||
ARG UCX_COMMIT
|
||||
ARG UCC_COMMIT
|
||||
ENV UCX_COMMIT $UCX_COMMIT
|
||||
ENV UCC_COMMIT $UCC_COMMIT
|
||||
ENV UCX_HOME /usr
|
||||
ENV UCC_HOME /usr
|
||||
ADD ./common/install_ucc.sh install_ucc.sh
|
||||
RUN if [ -n "${UCX_COMMIT}" ] && [ -n "${UCC_COMMIT}" ]; then bash ./install_ucc.sh; fi
|
||||
RUN rm install_ucc.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
COPY ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
COPY ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
COPY ./common/install_vision.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh cache_vision_models.sh common_utils.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# (optional) Install Android NDK
|
||||
ARG ANDROID
|
||||
ARG ANDROID_NDK
|
||||
ARG GRADLE_VERSION
|
||||
COPY ./common/install_android.sh ./common/cache_vision_models.sh ./common/common_utils.sh ./
|
||||
COPY ./android/AndroidManifest.xml AndroidManifest.xml
|
||||
COPY ./android/build.gradle build.gradle
|
||||
RUN if [ -n "${ANDROID}" ]; then bash ./install_android.sh; fi
|
||||
RUN rm install_android.sh cache_vision_models.sh common_utils.sh
|
||||
RUN rm AndroidManifest.xml
|
||||
RUN rm build.gradle
|
||||
ENV INSTALLED_ANDROID ${ANDROID}
|
||||
|
||||
# (optional) Install Vulkan SDK
|
||||
ARG VULKAN_SDK_VERSION
|
||||
COPY ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh
|
||||
RUN if [ -n "${VULKAN_SDK_VERSION}" ]; then bash ./install_vulkan_sdk.sh; fi
|
||||
RUN rm install_vulkan_sdk.sh
|
||||
|
||||
# (optional) Install swiftshader
|
||||
ARG SWIFTSHADER
|
||||
COPY ./common/install_swiftshader.sh install_swiftshader.sh
|
||||
RUN if [ -n "${SWIFTSHADER}" ]; then bash ./install_swiftshader.sh; fi
|
||||
RUN rm install_swiftshader.sh
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
COPY ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
COPY ./common/install_openssl.sh install_openssl.sh
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
ENV OPENSSL_DIR /opt/openssl
|
||||
RUN rm install_openssl.sh
|
||||
|
||||
ARG INDUCTOR_BENCHMARKS
|
||||
COPY ./common/install_inductor_benchmark_deps.sh install_inductor_benchmark_deps.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/huggingface.txt huggingface.txt
|
||||
COPY ci_commit_pins/timm.txt timm.txt
|
||||
RUN if [ -n "${INDUCTOR_BENCHMARKS}" ]; then bash ./install_inductor_benchmark_deps.sh; fi
|
||||
RUN rm install_inductor_benchmark_deps.sh common_utils.sh timm.txt huggingface.txt
|
||||
|
||||
ARG TRITON
|
||||
# Install triton, this needs to be done before sccache because the latter will
|
||||
# try to reach out to S3, which docker build runners don't have access
|
||||
COPY ./common/install_triton.sh install_triton.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/triton.txt triton.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton.txt
|
||||
|
||||
ARG EXECUTORCH
|
||||
# Build and install executorch
|
||||
COPY ./common/install_executorch.sh install_executorch.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/executorch.txt executorch.txt
|
||||
RUN if [ -n "${EXECUTORCH}" ]; then bash ./install_executorch.sh; fi
|
||||
RUN rm install_executorch.sh common_utils.sh executorch.txt
|
||||
|
||||
ARG ONNX
|
||||
# Install ONNX dependencies
|
||||
COPY ./common/install_onnx.sh ./common/common_utils.sh ./
|
||||
RUN if [ -n "${ONNX}" ]; then bash ./install_onnx.sh; fi
|
||||
RUN rm install_onnx.sh common_utils.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Add jni.h for java host build
|
||||
COPY ./common/install_jni.sh install_jni.sh
|
||||
COPY ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Install Open MPI for CUDA
|
||||
COPY ./common/install_openmpi.sh install_openmpi.sh
|
||||
RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi
|
||||
RUN rm install_openmpi.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
|
||||
ENV CUDA_PATH /usr/local/cuda
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,14 +0,0 @@
|
||||
# Jenkins
|
||||
|
||||
The scripts in this directory are the entrypoint for testing ONNX exporter.
|
||||
|
||||
The environment variable `BUILD_ENVIRONMENT` is expected to be set to
|
||||
the build environment you intend to test. It is a hint for the build
|
||||
and test scripts to configure Caffe2 a certain way and include/exclude
|
||||
tests. Docker images, they equal the name of the image itself. For
|
||||
example: `py2-cuda9.0-cudnn7-ubuntu16.04`. The Docker images that are
|
||||
built on Jenkins and are used in triggered builds already have this
|
||||
environment variable set in their manifest. Also see
|
||||
`./docker/jenkins/*/Dockerfile` and search for `BUILD_ENVIRONMENT`.
|
||||
|
||||
Our Jenkins installation is located at https://ci.pytorch.org/jenkins/.
|
||||
@ -1,19 +0,0 @@
|
||||
set -ex
|
||||
|
||||
LOCAL_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
ROOT_DIR=$(cd "$LOCAL_DIR"/../.. && pwd)
|
||||
TEST_DIR="$ROOT_DIR/test"
|
||||
pytest_reports_dir="${TEST_DIR}/test-reports/python"
|
||||
|
||||
# Figure out which Python to use
|
||||
PYTHON="$(which python)"
|
||||
if [[ "${BUILD_ENVIRONMENT}" =~ py((2|3)\.?[0-9]?\.?[0-9]?) ]]; then
|
||||
PYTHON=$(which "python${BASH_REMATCH[1]}")
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *rocm* ]]; then
|
||||
# HIP_PLATFORM is auto-detected by hipcc; unset to avoid build errors
|
||||
unset HIP_PLATFORM
|
||||
fi
|
||||
|
||||
mkdir -p "$pytest_reports_dir" || true
|
||||
@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then
|
||||
# TODO: This can be removed later once vision is also part of the Docker image
|
||||
pip install -q --user --no-use-pep517 "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)"
|
||||
# JIT C++ extensions require ninja, so put it into PATH.
|
||||
export PATH="/var/lib/jenkins/.local/bin:$PATH"
|
||||
# NB: ONNX test is fast (~15m) so it's ok to retry it few more times to avoid any flaky issue, we
|
||||
# need to bring this to the standard PyTorch run_test eventually. The issue will be tracked in
|
||||
# https://github.com/pytorch/pytorch/issues/98626
|
||||
"$ROOT_DIR/scripts/onnx/test.sh"
|
||||
fi
|
||||
@ -1,42 +0,0 @@
|
||||
This directory contains scripts for our continuous integration.
|
||||
|
||||
One important thing to keep in mind when reading the scripts here is
|
||||
that they are all based off of Docker images, which we build for each of
|
||||
the various system configurations we want to run on Jenkins. This means
|
||||
it is very easy to run these tests yourself:
|
||||
|
||||
1. Figure out what Docker image you want. The general template for our
|
||||
images look like:
|
||||
``registry.pytorch.org/pytorch/pytorch-$BUILD_ENVIRONMENT:$DOCKER_VERSION``,
|
||||
where ``$BUILD_ENVIRONMENT`` is one of the build environments
|
||||
enumerated in
|
||||
[pytorch-dockerfiles](https://github.com/pytorch/pytorch/blob/master/.ci/docker/build.sh). The dockerfile used by jenkins can be found under the `.ci` [directory](https://github.com/pytorch/pytorch/blob/master/.ci/docker)
|
||||
|
||||
2. Run ``docker run -it -u jenkins $DOCKER_IMAGE``, clone PyTorch and
|
||||
run one of the scripts in this directory.
|
||||
|
||||
The Docker images are designed so that any "reasonable" build commands
|
||||
will work; if you look in [build.sh](build.sh) you will see that it is a
|
||||
very simple script. This is intentional. Idiomatic build instructions
|
||||
should work inside all of our Docker images. You can tweak the commands
|
||||
however you need (e.g., in case you want to rebuild with DEBUG, or rerun
|
||||
the build with higher verbosity, etc.).
|
||||
|
||||
We have to do some work to make this so. Here is a summary of the
|
||||
mechanisms we use:
|
||||
|
||||
- We install binaries to directories like `/usr/local/bin` which
|
||||
are automatically part of your PATH.
|
||||
|
||||
- We add entries to the PATH using Docker ENV variables (so
|
||||
they apply when you enter Docker) and `/etc/environment` (so they
|
||||
continue to apply even if you sudo), instead of modifying
|
||||
`PATH` in our build scripts.
|
||||
|
||||
- We use `/etc/ld.so.conf.d` to register directories containing
|
||||
shared libraries, instead of modifying `LD_LIBRARY_PATH` in our
|
||||
build scripts.
|
||||
|
||||
- We reroute well known paths like `/usr/bin/gcc` to alternate
|
||||
implementations with `update-alternatives`, instead of setting
|
||||
`CC` and `CXX` in our implementations.
|
||||
@ -1,336 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# Required environment variable: $BUILD_ENVIRONMENT
|
||||
# (This is set by default in the Docker images we build, so you don't
|
||||
# need to set it yourself.
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
# shellcheck source=./common-build.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-mobile-*build* ]]; then
|
||||
exec "$(dirname "${BASH_SOURCE[0]}")/build-mobile.sh" "$@"
|
||||
fi
|
||||
|
||||
echo "Python version:"
|
||||
python --version
|
||||
|
||||
echo "GCC version:"
|
||||
gcc --version
|
||||
|
||||
echo "CMake version:"
|
||||
cmake --version
|
||||
|
||||
echo "Environment variables:"
|
||||
env
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
|
||||
echo "NVCC version:"
|
||||
nvcc --version
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda11* ]]; then
|
||||
if [[ "$BUILD_ENVIRONMENT" != *cuda11.3* && "$BUILD_ENVIRONMENT" != *clang* ]]; then
|
||||
# TODO: there is a linking issue when building with UCC using clang,
|
||||
# disable it for now and to be fix later.
|
||||
# TODO: disable UCC temporarily to enable CUDA 12.1 in CI
|
||||
export USE_UCC=1
|
||||
export USE_SYSTEM_UCC=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"caffe2"* ]]; then
|
||||
echo "Caffe2 build is ON"
|
||||
export BUILD_CAFFE2=ON
|
||||
fi
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
export ATEN_THREADING=TBB
|
||||
export USE_TBB=1
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
export ATEN_THREADING=NATIVE
|
||||
fi
|
||||
|
||||
# Enable LLVM dependency for TensorExpr testing
|
||||
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
export USE_LLVM=/opt/rocm/llvm
|
||||
export LLVM_DIR=/opt/rocm/llvm/lib/cmake/llvm
|
||||
else
|
||||
export USE_LLVM=/opt/llvm
|
||||
export LLVM_DIR=/opt/llvm/lib/cmake/llvm
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *executorch* ]]; then
|
||||
# To build test_edge_op_registration
|
||||
export BUILD_EXECUTORCH=ON
|
||||
export USE_CUDA=0
|
||||
fi
|
||||
|
||||
if ! which conda; then
|
||||
# In ROCm CIs, we are doing cross compilation on build machines with
|
||||
# intel cpu and later run tests on machines with amd cpu.
|
||||
# Also leave out two builds to make sure non-mkldnn builds still work.
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then
|
||||
export USE_MKLDNN=1
|
||||
else
|
||||
export USE_MKLDNN=0
|
||||
fi
|
||||
else
|
||||
export CMAKE_PREFIX_PATH=/opt/conda
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *libtorch* ]]; then
|
||||
POSSIBLE_JAVA_HOMES=()
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/local)
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/lib/jvm/java-8-openjdk-amd64)
|
||||
POSSIBLE_JAVA_HOMES+=(/Library/Java/JavaVirtualMachines/*.jdk/Contents/Home)
|
||||
# Add the Windows-specific JNI
|
||||
POSSIBLE_JAVA_HOMES+=("$PWD/.circleci/windows-jni/")
|
||||
for JH in "${POSSIBLE_JAVA_HOMES[@]}" ; do
|
||||
if [[ -e "$JH/include/jni.h" ]] ; then
|
||||
# Skip if we're not on Windows but haven't found a JAVA_HOME
|
||||
if [[ "$JH" == "$PWD/.circleci/windows-jni/" && "$OSTYPE" != "msys" ]] ; then
|
||||
break
|
||||
fi
|
||||
echo "Found jni.h under $JH"
|
||||
export JAVA_HOME="$JH"
|
||||
export BUILD_JNI=ON
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
echo "Did not find jni.h"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Use special scripts for Android builds
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-android* ]]; then
|
||||
export ANDROID_NDK=/opt/ndk
|
||||
build_args=()
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-arm-v7a* ]]; then
|
||||
build_args+=("-DANDROID_ABI=armeabi-v7a")
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *-arm-v8a* ]]; then
|
||||
build_args+=("-DANDROID_ABI=arm64-v8a")
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *-x86_32* ]]; then
|
||||
build_args+=("-DANDROID_ABI=x86")
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *-x86_64* ]]; then
|
||||
build_args+=("-DANDROID_ABI=x86_64")
|
||||
fi
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *vulkan* ]]; then
|
||||
build_args+=("-DUSE_VULKAN=ON")
|
||||
fi
|
||||
build_args+=("-DUSE_LITE_INTERPRETER_PROFILER=OFF")
|
||||
exec ./scripts/build_android.sh "${build_args[@]}" "$@"
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" != *android* && "$BUILD_ENVIRONMENT" == *vulkan* ]]; then
|
||||
export USE_VULKAN=1
|
||||
# shellcheck disable=SC1091
|
||||
source /var/lib/jenkins/vulkansdk/setup-env.sh
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
# hcc used to run out of memory, silently exiting without stopping
|
||||
# the build process, leaving undefined symbols in the shared lib,
|
||||
# causing undefined symbol errors when later running tests.
|
||||
# We used to set MAX_JOBS to 4 to avoid, but this is no longer an issue.
|
||||
if [ -z "$MAX_JOBS" ]; then
|
||||
export MAX_JOBS=$(($(nproc) - 1))
|
||||
fi
|
||||
|
||||
if [[ -n "$CI" && -z "$PYTORCH_ROCM_ARCH" ]]; then
|
||||
# Set ROCM_ARCH to gfx906 for CI builds, if user doesn't override.
|
||||
echo "Limiting PYTORCH_ROCM_ARCH to gfx906 for CI builds"
|
||||
export PYTORCH_ROCM_ARCH="gfx906"
|
||||
fi
|
||||
|
||||
# hipify sources
|
||||
python tools/amd_build/build_amd.py
|
||||
fi
|
||||
|
||||
# sccache will fail for CUDA builds if all cores are used for compiling
|
||||
# gcc 7 with sccache seems to have intermittent OOM issue if all cores are used
|
||||
if [ -z "$MAX_JOBS" ]; then
|
||||
if { [[ "$BUILD_ENVIRONMENT" == *cuda* ]] || [[ "$BUILD_ENVIRONMENT" == *gcc7* ]]; } && which sccache > /dev/null; then
|
||||
export MAX_JOBS=$(($(nproc) - 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
# TORCH_CUDA_ARCH_LIST must be passed from an environment variable
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* && -z "$TORCH_CUDA_ARCH_LIST" ]]; then
|
||||
echo "TORCH_CUDA_ARCH_LIST must be defined"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# We only build FlashAttention files for CUDA 8.0+, and they require large amounts of
|
||||
# memory to build and will OOM
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]] && [[ "$TORCH_CUDA_ARCH_LIST" == *"8.6"* || "$TORCH_CUDA_ARCH_LIST" == *"8.0"* ]]; then
|
||||
echo "WARNING: FlashAttention files require large amounts of memory to build and will OOM"
|
||||
echo "Setting MAX_JOBS=(nproc-2)/3 to reduce memory usage"
|
||||
export MAX_JOBS="$(( $(nproc --ignore=2) / 3 ))"
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *clang* ]]; then
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-clang*-asan* ]]; then
|
||||
export LDSHARED="clang --shared"
|
||||
export USE_CUDA=0
|
||||
export USE_ASAN=1
|
||||
export UBSAN_FLAGS="-fno-sanitize-recover=all;-fno-sanitize=float-divide-by-zero;-fno-sanitize=float-cast-overflow"
|
||||
unset USE_LLVM
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *no-ops* ]]; then
|
||||
export USE_PER_OPERATOR_HEADERS=0
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *-pch* ]]; then
|
||||
export USE_PRECOMPILED_HEADERS=1
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *linux-focal-py3.7-gcc7-build* ]]; then
|
||||
export USE_GLOO_WITH_OPENSSL=ON
|
||||
fi
|
||||
|
||||
if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]]; then
|
||||
export BUILD_STATIC_RUNTIME_BENCHMARK=ON
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then
|
||||
set -e
|
||||
|
||||
get_bazel
|
||||
install_sccache_nvcc_for_bazel
|
||||
|
||||
# Leave 1 CPU free and use only up to 80% of memory to reduce the change of crashing
|
||||
# the runner
|
||||
BAZEL_MEM_LIMIT="--local_ram_resources=HOST_RAM*.8"
|
||||
BAZEL_CPU_LIMIT="--local_cpu_resources=HOST_CPUS-1"
|
||||
|
||||
if [[ "$CUDA_VERSION" == "cpu" ]]; then
|
||||
# Build torch, the Python module, and tests for CPU-only
|
||||
tools/bazel build --config=no-tty "${BAZEL_MEM_LIMIT}" "${BAZEL_CPU_LIMIT}" --config=cpu-only :torch :torch/_C.so :all_tests
|
||||
else
|
||||
tools/bazel build --config=no-tty "${BAZEL_MEM_LIMIT}" "${BAZEL_CPU_LIMIT}" //...
|
||||
fi
|
||||
else
|
||||
# check that setup.py would fail with bad arguments
|
||||
echo "The next three invocations are expected to fail with invalid command error messages."
|
||||
( ! get_exit_code python setup.py bad_argument )
|
||||
( ! get_exit_code python setup.py clean] )
|
||||
( ! get_exit_code python setup.py clean bad_argument )
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" != *libtorch* ]]; then
|
||||
|
||||
# rocm builds fail when WERROR=1
|
||||
# XLA test build fails when WERROR=1
|
||||
# set only when building other architectures
|
||||
# or building non-XLA tests.
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* &&
|
||||
"$BUILD_ENVIRONMENT" != *xla* ]]; then
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
else
|
||||
python setup.py bdist_wheel
|
||||
fi
|
||||
pip_install_whl "$(echo dist/*.whl)"
|
||||
|
||||
# TODO: I'm not sure why, but somehow we lose verbose commands
|
||||
set -x
|
||||
|
||||
assert_git_not_dirty
|
||||
# Copy ninja build logs to dist folder
|
||||
mkdir -p dist
|
||||
if [ -f build/.ninja_log ]; then
|
||||
cp build/.ninja_log dist
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
|
||||
# remove sccache wrappers post-build; runtime compilation of MIOpen kernels does not yet fully support them
|
||||
sudo rm -f /opt/cache/bin/cc
|
||||
sudo rm -f /opt/cache/bin/c++
|
||||
sudo rm -f /opt/cache/bin/gcc
|
||||
sudo rm -f /opt/cache/bin/g++
|
||||
pushd /opt/rocm/llvm/bin
|
||||
if [[ -d original ]]; then
|
||||
sudo mv original/clang .
|
||||
sudo mv original/clang++ .
|
||||
fi
|
||||
sudo rm -rf original
|
||||
popd
|
||||
fi
|
||||
|
||||
CUSTOM_TEST_ARTIFACT_BUILD_DIR=${CUSTOM_TEST_ARTIFACT_BUILD_DIR:-"build/custom_test_artifacts"}
|
||||
CUSTOM_TEST_USE_ROCM=$([[ "$BUILD_ENVIRONMENT" == *rocm* ]] && echo "ON" || echo "OFF")
|
||||
CUSTOM_TEST_MODULE_PATH="${PWD}/cmake/public"
|
||||
mkdir -pv "${CUSTOM_TEST_ARTIFACT_BUILD_DIR}"
|
||||
|
||||
# Build custom operator tests.
|
||||
CUSTOM_OP_BUILD="${CUSTOM_TEST_ARTIFACT_BUILD_DIR}/custom-op-build"
|
||||
CUSTOM_OP_TEST="$PWD/test/custom_operator"
|
||||
python --version
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
mkdir -p "$CUSTOM_OP_BUILD"
|
||||
pushd "$CUSTOM_OP_BUILD"
|
||||
cmake "$CUSTOM_OP_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
assert_git_not_dirty
|
||||
|
||||
# Build jit hook tests
|
||||
JIT_HOOK_BUILD="${CUSTOM_TEST_ARTIFACT_BUILD_DIR}/jit-hook-build"
|
||||
JIT_HOOK_TEST="$PWD/test/jit_hooks"
|
||||
python --version
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
mkdir -p "$JIT_HOOK_BUILD"
|
||||
pushd "$JIT_HOOK_BUILD"
|
||||
cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
assert_git_not_dirty
|
||||
|
||||
# Build custom backend tests.
|
||||
CUSTOM_BACKEND_BUILD="${CUSTOM_TEST_ARTIFACT_BUILD_DIR}/custom-backend-build"
|
||||
CUSTOM_BACKEND_TEST="$PWD/test/custom_backend"
|
||||
python --version
|
||||
mkdir -p "$CUSTOM_BACKEND_BUILD"
|
||||
pushd "$CUSTOM_BACKEND_BUILD"
|
||||
cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
assert_git_not_dirty
|
||||
else
|
||||
# Test no-Python build
|
||||
echo "Building libtorch"
|
||||
|
||||
# This is an attempt to mitigate flaky libtorch build OOM error. By default, the build parallelization
|
||||
# is set to be the number of CPU minus 2. So, let's try a more conservative value here. A 4xlarge has
|
||||
# 16 CPUs
|
||||
MAX_JOBS=$(nproc --ignore=4)
|
||||
export MAX_JOBS
|
||||
|
||||
# NB: Install outside of source directory (at the same level as the root
|
||||
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
|
||||
BUILD_LIBTORCH_PY=$PWD/tools/build_libtorch.py
|
||||
mkdir -p ../cpp-build/caffe2
|
||||
pushd ../cpp-build/caffe2
|
||||
WERROR=1 VERBOSE=1 DEBUG=1 python "$BUILD_LIBTORCH_PY"
|
||||
popd
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" != *libtorch* && "$BUILD_ENVIRONMENT" != *bazel* ]]; then
|
||||
# export test times so that potential sharded tests that'll branch off this build will use consistent data
|
||||
# don't do this for libtorch as libtorch is C++ only and thus won't have python tests run on its build
|
||||
python tools/stats/export_test_times.py
|
||||
fi
|
||||
|
||||
print_sccache_stats
|
||||
@ -1,59 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Required environment variables:
|
||||
# $BUILD_ENVIRONMENT (should be set by your Docker image)
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" != *win-* ]]; then
|
||||
# Save the absolute path in case later we chdir (as occurs in the gpu perf test)
|
||||
script_dir="$( cd "$(dirname "${BASH_SOURCE[0]}")" || exit ; pwd -P )"
|
||||
|
||||
if which sccache > /dev/null; then
|
||||
# Save sccache logs to file
|
||||
sccache --stop-server > /dev/null 2>&1 || true
|
||||
rm -f ~/sccache_error.log || true
|
||||
|
||||
function sccache_epilogue() {
|
||||
echo "::group::Sccache Compilation Log"
|
||||
echo '=================== sccache compilation log ==================='
|
||||
python "$script_dir/print_sccache_log.py" ~/sccache_error.log 2>/dev/null || true
|
||||
echo '=========== If your build fails, please take a look at the log above for possible reasons ==========='
|
||||
sccache --show-stats
|
||||
sccache --stop-server || true
|
||||
echo "::endgroup::"
|
||||
}
|
||||
|
||||
# Register the function here so that the error log can be printed even when
|
||||
# sccache fails to start, i.e. timeout error
|
||||
trap_add sccache_epilogue EXIT
|
||||
|
||||
if [[ -n "${SKIP_SCCACHE_INITIALIZATION:-}" ]]; then
|
||||
# sccache --start-server seems to hang forever on self hosted runners for GHA
|
||||
# so let's just go ahead and skip the --start-server altogether since it seems
|
||||
# as though sccache still gets used even when the sscache server isn't started
|
||||
# explicitly
|
||||
echo "Skipping sccache server initialization, setting environment variables"
|
||||
export SCCACHE_IDLE_TIMEOUT=0
|
||||
export SCCACHE_ERROR_LOG=~/sccache_error.log
|
||||
export RUST_LOG=sccache::server=error
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *rocm* ]]; then
|
||||
SCCACHE_ERROR_LOG=~/sccache_error.log SCCACHE_IDLE_TIMEOUT=0 sccache --start-server
|
||||
else
|
||||
# increasing SCCACHE_IDLE_TIMEOUT so that extension_backend_test.cpp can build after this PR:
|
||||
# https://github.com/pytorch/pytorch/pull/16645
|
||||
SCCACHE_ERROR_LOG=~/sccache_error.log SCCACHE_IDLE_TIMEOUT=0 RUST_LOG=sccache::server=error sccache --start-server
|
||||
fi
|
||||
|
||||
# Report sccache stats for easier debugging. It's ok if this commands
|
||||
# timeouts and fails on MacOS
|
||||
sccache --zero-stats || true
|
||||
fi
|
||||
|
||||
if which ccache > /dev/null; then
|
||||
# Report ccache stats for easier debugging
|
||||
ccache --zero-stats
|
||||
ccache --show-stats
|
||||
function ccache_epilogue() {
|
||||
ccache --show-stats
|
||||
}
|
||||
trap_add ccache_epilogue EXIT
|
||||
fi
|
||||
fi
|
||||
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Common setup for all Jenkins scripts
|
||||
# shellcheck source=./common_utils.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
set -ex
|
||||
|
||||
# Required environment variables:
|
||||
# $BUILD_ENVIRONMENT (should be set by your Docker image)
|
||||
|
||||
# Figure out which Python to use for ROCm
|
||||
if [[ "${BUILD_ENVIRONMENT}" == *rocm* ]]; then
|
||||
# HIP_PLATFORM is auto-detected by hipcc; unset to avoid build errors
|
||||
unset HIP_PLATFORM
|
||||
export PYTORCH_TEST_WITH_ROCM=1
|
||||
# temporary to locate some kernel issues on the CI nodes
|
||||
export HSAKMT_DEBUG_LEVEL=4
|
||||
# improve rccl performance for distributed tests
|
||||
export HSA_FORCE_FINE_GRAIN_PCIE=1
|
||||
fi
|
||||
|
||||
# TODO: Renable libtorch testing for MacOS, see https://github.com/pytorch/pytorch/issues/62598
|
||||
# shellcheck disable=SC2034
|
||||
BUILD_TEST_LIBTORCH=0
|
||||
@ -1,235 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Common util **functions** that can be sourced in other scripts.
|
||||
|
||||
# note: printf is used instead of echo to avoid backslash
|
||||
# processing and to properly handle values that begin with a '-'.
|
||||
|
||||
log() { printf '%s\n' "$*"; }
|
||||
error() { log "ERROR: $*" >&2; }
|
||||
fatal() { error "$@"; exit 1; }
|
||||
|
||||
retry () {
|
||||
"$@" || (sleep 10 && "$@") || (sleep 20 && "$@") || (sleep 40 && "$@")
|
||||
}
|
||||
|
||||
# compositional trap taken from https://stackoverflow.com/a/7287873/23845
|
||||
# appends a command to a trap
|
||||
#
|
||||
# - 1st arg: code to add
|
||||
# - remaining args: names of traps to modify
|
||||
#
|
||||
trap_add() {
|
||||
trap_add_cmd=$1; shift || fatal "${FUNCNAME[0]} usage error"
|
||||
for trap_add_name in "$@"; do
|
||||
trap -- "$(
|
||||
# helper fn to get existing trap command from output
|
||||
# of trap -p
|
||||
extract_trap_cmd() { printf '%s\n' "$3"; }
|
||||
# print existing trap command with newline
|
||||
eval "extract_trap_cmd $(trap -p "${trap_add_name}")"
|
||||
# print the new trap command
|
||||
printf '%s\n' "${trap_add_cmd}"
|
||||
)" "${trap_add_name}" \
|
||||
|| fatal "unable to add to trap ${trap_add_name}"
|
||||
done
|
||||
}
|
||||
# set the trace attribute for the above function. this is
|
||||
# required to modify DEBUG or RETURN traps because functions don't
|
||||
# inherit them unless the trace attribute is set
|
||||
declare -f -t trap_add
|
||||
|
||||
function assert_git_not_dirty() {
|
||||
# TODO: we should add an option to `build_amd.py` that reverts the repo to
|
||||
# an unmodified state.
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *xla* ]] ; then
|
||||
git_status=$(git status --porcelain | grep -v '?? third_party' || true)
|
||||
if [[ $git_status ]]; then
|
||||
echo "Build left local git repository checkout dirty"
|
||||
echo "git status --porcelain:"
|
||||
echo "${git_status}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function pip_install_whl() {
|
||||
# This is used to install PyTorch and other build artifacts wheel locally
|
||||
# without using any network connection
|
||||
python3 -mpip install --no-index --no-deps "$@"
|
||||
}
|
||||
|
||||
function pip_install() {
|
||||
# retry 3 times
|
||||
# old versions of pip don't have the "--progress-bar" flag
|
||||
pip install --progress-bar off "$@" || pip install --progress-bar off "$@" || pip install --progress-bar off "$@" ||\
|
||||
pip install "$@" || pip install "$@" || pip install "$@"
|
||||
}
|
||||
|
||||
function pip_uninstall() {
|
||||
# uninstall 2 times
|
||||
pip uninstall -y "$@" || pip uninstall -y "$@"
|
||||
}
|
||||
|
||||
function get_exit_code() {
|
||||
set +e
|
||||
"$@"
|
||||
retcode=$?
|
||||
set -e
|
||||
return $retcode
|
||||
}
|
||||
|
||||
function get_bazel() {
|
||||
# Download and use the cross-platform, dependency-free Python
|
||||
# version of Bazelisk to fetch the platform specific version of
|
||||
# Bazel to use from .bazelversion.
|
||||
retry curl --location --output tools/bazel \
|
||||
https://raw.githubusercontent.com/bazelbuild/bazelisk/v1.16.0/bazelisk.py
|
||||
shasum --algorithm=1 --check \
|
||||
<(echo 'd4369c3d293814d3188019c9f7527a948972d9f8 tools/bazel')
|
||||
chmod u+x tools/bazel
|
||||
}
|
||||
|
||||
# This function is bazel specific because of the bug
|
||||
# in the bazel that requires some special paths massaging
|
||||
# as a workaround. See
|
||||
# https://github.com/bazelbuild/bazel/issues/10167
|
||||
function install_sccache_nvcc_for_bazel() {
|
||||
sudo mv /usr/local/cuda/bin/nvcc /usr/local/cuda/bin/nvcc-real
|
||||
|
||||
# Write the `/usr/local/cuda/bin/nvcc`
|
||||
cat << EOF | sudo tee /usr/local/cuda/bin/nvcc
|
||||
#!/bin/sh
|
||||
if [ \$(env -u LD_PRELOAD ps -p \$PPID -o comm=) != sccache ]; then
|
||||
exec sccache /usr/local/cuda/bin/nvcc "\$@"
|
||||
else
|
||||
exec external/local_cuda/cuda/bin/nvcc-real "\$@"
|
||||
fi
|
||||
EOF
|
||||
|
||||
sudo chmod +x /usr/local/cuda/bin/nvcc
|
||||
}
|
||||
|
||||
function install_monkeytype {
|
||||
# Install MonkeyType
|
||||
pip_install MonkeyType
|
||||
}
|
||||
|
||||
|
||||
function get_pinned_commit() {
|
||||
cat .github/ci_commit_pins/"${1}".txt
|
||||
}
|
||||
|
||||
function install_torchaudio() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit audio)
|
||||
if [[ "$1" == "cuda" ]]; then
|
||||
# TODO: This is better to be passed as a parameter from _linux-test workflow
|
||||
# so that it can be consistent with what is set in build
|
||||
TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}"
|
||||
else
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
function install_torchtext() {
|
||||
local data_commit
|
||||
local text_commit
|
||||
data_commit=$(get_pinned_commit data)
|
||||
text_commit=$(get_pinned_commit text)
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/data.git@${data_commit}"
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/text.git@${text_commit}"
|
||||
}
|
||||
|
||||
function install_torchvision() {
|
||||
local orig_preload
|
||||
local commit
|
||||
commit=$(get_pinned_commit vision)
|
||||
orig_preload=${LD_PRELOAD}
|
||||
if [ -n "${LD_PRELOAD}" ]; then
|
||||
# Silence dlerror to work-around glibc ASAN bug, see https://sourceware.org/bugzilla/show_bug.cgi?id=27653#c9
|
||||
echo 'char* dlerror(void) { return "";}'|gcc -fpic -shared -o "${HOME}/dlerror.so" -x c -
|
||||
LD_PRELOAD=${orig_preload}:${HOME}/dlerror.so
|
||||
fi
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/vision.git@${commit}"
|
||||
if [ -n "${LD_PRELOAD}" ]; then
|
||||
LD_PRELOAD=${orig_preload}
|
||||
fi
|
||||
}
|
||||
|
||||
function install_torchrec_and_fbgemm() {
|
||||
local torchrec_commit
|
||||
torchrec_commit=$(get_pinned_commit torchrec)
|
||||
local fbgemm_commit
|
||||
fbgemm_commit=$(get_pinned_commit fbgemm)
|
||||
pip_uninstall torchrec-nightly
|
||||
pip_uninstall fbgemm-gpu-nightly
|
||||
pip_install setuptools-git-versioning scikit-build pyre-extensions
|
||||
# See https://github.com/pytorch/pytorch/issues/106971
|
||||
CUDA_PATH=/usr/local/cuda-12.1 pip_install --no-use-pep517 --user "git+https://github.com/pytorch/FBGEMM.git@${fbgemm_commit}#egg=fbgemm-gpu&subdirectory=fbgemm_gpu"
|
||||
pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}"
|
||||
}
|
||||
|
||||
function clone_pytorch_xla() {
|
||||
if [[ ! -d ./xla ]]; then
|
||||
git clone --recursive --quiet https://github.com/pytorch/xla.git
|
||||
pushd xla
|
||||
# pin the xla hash so that we don't get broken by changes to xla
|
||||
git checkout "$(cat ../.github/ci_commit_pins/xla.txt)"
|
||||
git submodule sync
|
||||
git submodule update --init --recursive
|
||||
popd
|
||||
fi
|
||||
}
|
||||
|
||||
function checkout_install_torchdeploy() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit multipy)
|
||||
pushd ..
|
||||
git clone --recurse-submodules https://github.com/pytorch/multipy.git
|
||||
pushd multipy
|
||||
git checkout "${commit}"
|
||||
python multipy/runtime/example/generate_examples.py
|
||||
BUILD_CUDA_TESTS=1 pip install -e .
|
||||
popd
|
||||
popd
|
||||
}
|
||||
|
||||
function test_torch_deploy(){
|
||||
pushd ..
|
||||
pushd multipy
|
||||
./multipy/runtime/build/test_deploy
|
||||
./multipy/runtime/build/test_deploy_gpu
|
||||
popd
|
||||
popd
|
||||
}
|
||||
|
||||
function checkout_install_torchbench() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit torchbench)
|
||||
git clone https://github.com/pytorch/benchmark torchbench
|
||||
pushd torchbench
|
||||
git checkout "$commit"
|
||||
|
||||
if [ "$1" ]; then
|
||||
python install.py --continue_on_fail models "$@"
|
||||
else
|
||||
# Occasionally the installation may fail on one model but it is ok to continue
|
||||
# to install and test other models
|
||||
python install.py --continue_on_fail
|
||||
fi
|
||||
popd
|
||||
}
|
||||
|
||||
function print_sccache_stats() {
|
||||
echo 'PyTorch Build Statistics'
|
||||
sccache --show-stats
|
||||
|
||||
if [[ -n "${OUR_GITHUB_JOB_ID}" ]]; then
|
||||
sccache --show-stats --stats-format json | jq .stats \
|
||||
> "sccache-stats-${BUILD_ENVIRONMENT}-${OUR_GITHUB_JOB_ID}.json"
|
||||
else
|
||||
echo "env var OUR_GITHUB_JOB_ID not set, will not write sccache stats to json"
|
||||
fi
|
||||
}
|
||||
@ -1,124 +0,0 @@
|
||||
from datetime import datetime, timedelta
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.primitives import hashes, serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
from cryptography.x509.oid import NameOID
|
||||
|
||||
temp_dir = mkdtemp()
|
||||
print(temp_dir)
|
||||
|
||||
|
||||
def genrsa(path):
|
||||
key = rsa.generate_private_key(
|
||||
public_exponent=65537,
|
||||
key_size=2048,
|
||||
)
|
||||
with open(path, "wb") as f:
|
||||
f.write(
|
||||
key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.TraditionalOpenSSL,
|
||||
encryption_algorithm=serialization.NoEncryption(),
|
||||
)
|
||||
)
|
||||
return key
|
||||
|
||||
|
||||
def create_cert(path, C, ST, L, O, key):
|
||||
subject = issuer = x509.Name(
|
||||
[
|
||||
x509.NameAttribute(NameOID.COUNTRY_NAME, C),
|
||||
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST),
|
||||
x509.NameAttribute(NameOID.LOCALITY_NAME, L),
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, O),
|
||||
]
|
||||
)
|
||||
cert = (
|
||||
x509.CertificateBuilder()
|
||||
.subject_name(subject)
|
||||
.issuer_name(issuer)
|
||||
.public_key(key.public_key())
|
||||
.serial_number(x509.random_serial_number())
|
||||
.not_valid_before(datetime.utcnow())
|
||||
.not_valid_after(
|
||||
# Our certificate will be valid for 10 days
|
||||
datetime.utcnow()
|
||||
+ timedelta(days=10)
|
||||
)
|
||||
.add_extension(
|
||||
x509.BasicConstraints(ca=True, path_length=None),
|
||||
critical=True,
|
||||
)
|
||||
.sign(key, hashes.SHA256())
|
||||
)
|
||||
# Write our certificate out to disk.
|
||||
with open(path, "wb") as f:
|
||||
f.write(cert.public_bytes(serialization.Encoding.PEM))
|
||||
return cert
|
||||
|
||||
|
||||
def create_req(path, C, ST, L, O, key):
|
||||
csr = (
|
||||
x509.CertificateSigningRequestBuilder()
|
||||
.subject_name(
|
||||
x509.Name(
|
||||
[
|
||||
# Provide various details about who we are.
|
||||
x509.NameAttribute(NameOID.COUNTRY_NAME, C),
|
||||
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, ST),
|
||||
x509.NameAttribute(NameOID.LOCALITY_NAME, L),
|
||||
x509.NameAttribute(NameOID.ORGANIZATION_NAME, O),
|
||||
]
|
||||
)
|
||||
)
|
||||
.sign(key, hashes.SHA256())
|
||||
)
|
||||
with open(path, "wb") as f:
|
||||
f.write(csr.public_bytes(serialization.Encoding.PEM))
|
||||
return csr
|
||||
|
||||
|
||||
def sign_certificate_request(path, csr_cert, ca_cert, private_ca_key):
|
||||
cert = (
|
||||
x509.CertificateBuilder()
|
||||
.subject_name(csr_cert.subject)
|
||||
.issuer_name(ca_cert.subject)
|
||||
.public_key(csr_cert.public_key())
|
||||
.serial_number(x509.random_serial_number())
|
||||
.not_valid_before(datetime.utcnow())
|
||||
.not_valid_after(
|
||||
# Our certificate will be valid for 10 days
|
||||
datetime.utcnow()
|
||||
+ timedelta(days=10)
|
||||
# Sign our certificate with our private key
|
||||
)
|
||||
.sign(private_ca_key, hashes.SHA256())
|
||||
)
|
||||
with open(path, "wb") as f:
|
||||
f.write(cert.public_bytes(serialization.Encoding.PEM))
|
||||
return cert
|
||||
|
||||
|
||||
ca_key = genrsa(temp_dir + "/ca.key")
|
||||
ca_cert = create_cert(
|
||||
temp_dir + "/ca.pem",
|
||||
"US",
|
||||
"New York",
|
||||
"New York",
|
||||
"Gloo Certificate Authority",
|
||||
ca_key,
|
||||
)
|
||||
|
||||
pkey = genrsa(temp_dir + "/pkey.key")
|
||||
csr = create_req(
|
||||
temp_dir + "/csr.csr",
|
||||
"US",
|
||||
"California",
|
||||
"San Francisco",
|
||||
"Gloo Testing Company",
|
||||
pkey,
|
||||
)
|
||||
|
||||
cert = sign_certificate_request(temp_dir + "/cert.pem", csr, ca_cert, ca_key)
|
||||
@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
docker build -t pytorch .
|
||||
@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
echo "Testing pytorch docs"
|
||||
|
||||
cd docs
|
||||
make doctest
|
||||
@ -1,40 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This is where the local pytorch install in the docker image is located
|
||||
pt_checkout="/var/lib/jenkins/workspace"
|
||||
source "$pt_checkout/.ci/pytorch/common_utils.sh"
|
||||
echo "functorch_doc_push_script.sh: Invoked with $*"
|
||||
|
||||
set -ex
|
||||
|
||||
version=${DOCS_VERSION:-nightly}
|
||||
echo "version: $version"
|
||||
|
||||
# Build functorch docs
|
||||
pushd $pt_checkout/functorch/docs
|
||||
make html
|
||||
popd
|
||||
|
||||
git clone https://github.com/pytorch/functorch -b gh-pages --depth 1 functorch_ghpages
|
||||
pushd functorch_ghpages
|
||||
|
||||
if [ "$version" == "main" ]; then
|
||||
version=nightly
|
||||
fi
|
||||
|
||||
git rm -rf "$version" || true
|
||||
mv "$pt_checkout/functorch/docs/build/html" "$version"
|
||||
|
||||
git add "$version" || true
|
||||
git status
|
||||
git config user.email "soumith+bot@pytorch.org"
|
||||
git config user.name "pytorchbot"
|
||||
# If there aren't changes, don't make a commit; push is no-op
|
||||
git commit -m "Generate Python docs from pytorch/pytorch@${GITHUB_SHA}" || true
|
||||
git status
|
||||
|
||||
if [[ "${WITH_PUSH:-}" == true ]]; then
|
||||
git push -u origin gh-pages
|
||||
fi
|
||||
|
||||
popd
|
||||
@ -1,92 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
# shellcheck source=./macos-common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/macos-common.sh"
|
||||
# shellcheck source=./common-build.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common-build.sh"
|
||||
|
||||
# Build PyTorch
|
||||
if [ -z "${CI}" ]; then
|
||||
export DEVELOPER_DIR=/Applications/Xcode9.app/Contents/Developer
|
||||
fi
|
||||
|
||||
# This helper function wraps calls to binaries with sccache, but only if they're not already wrapped with sccache.
|
||||
# For example, `clang` will be `sccache clang`, but `sccache clang` will not become `sccache sccache clang`.
|
||||
# The way this is done is by detecting the command of the parent pid of the current process and checking whether
|
||||
# that is sccache, and wrapping sccache around the process if its parent were not already sccache.
|
||||
function write_sccache_stub() {
|
||||
output=$1
|
||||
binary=$(basename "${output}")
|
||||
|
||||
printf "#!/bin/sh\nif [ \$(ps auxc \$(ps auxc -o ppid \$\$ | grep \$\$ | rev | cut -d' ' -f1 | rev) | tr '\\\\n' ' ' | rev | cut -d' ' -f2 | rev) != sccache ]; then\n exec sccache %s \"\$@\"\nelse\n exec %s \"\$@\"\nfi" "$(which "${binary}")" "$(which "${binary}")" > "${output}"
|
||||
chmod a+x "${output}"
|
||||
}
|
||||
|
||||
if which sccache > /dev/null; then
|
||||
# Create temp directory for sccache shims
|
||||
tmp_dir=$(mktemp -d)
|
||||
trap 'rm -rfv ${tmp_dir}' EXIT
|
||||
write_sccache_stub "${tmp_dir}/clang++"
|
||||
write_sccache_stub "${tmp_dir}/clang"
|
||||
|
||||
export PATH="${tmp_dir}:$PATH"
|
||||
fi
|
||||
|
||||
cross_compile_arm64() {
|
||||
# Cross compilation for arm64
|
||||
# Explicitly set USE_DISTRIBUTED=0 to align with the default build config on mac. This also serves as the sole CI config that tests
|
||||
# that building with USE_DISTRIBUTED=0 works at all. See https://github.com/pytorch/pytorch/issues/86448
|
||||
USE_DISTRIBUTED=0 CMAKE_OSX_ARCHITECTURES=arm64 MACOSX_DEPLOYMENT_TARGET=11.0 USE_MKLDNN=OFF USE_QNNPACK=OFF WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel
|
||||
}
|
||||
|
||||
compile_arm64() {
|
||||
# Compilation for arm64
|
||||
# TODO: Compile with OpenMP support (but this causes CI regressions as cross-compilation were done with OpenMP disabled)
|
||||
USE_DISTRIBUTED=0 USE_OPENMP=1 MACOSX_DEPLOYMENT_TARGET=11.0 WERROR=1 BUILD_TEST=OFF USE_PYTORCH_METAL=1 python setup.py bdist_wheel
|
||||
}
|
||||
|
||||
compile_x86_64() {
|
||||
USE_DISTRIBUTED=0 WERROR=1 python setup.py bdist_wheel --plat-name=macosx_10_9_x86_64
|
||||
}
|
||||
|
||||
build_lite_interpreter() {
|
||||
echo "Testing libtorch (lite interpreter)."
|
||||
|
||||
CPP_BUILD="$(pwd)/../cpp_build"
|
||||
# Ensure the removal of the tmp directory
|
||||
trap 'rm -rfv ${CPP_BUILD}' EXIT
|
||||
rm -rf "${CPP_BUILD}"
|
||||
mkdir -p "${CPP_BUILD}/caffe2"
|
||||
|
||||
# It looks libtorch need to be built in "${CPP_BUILD}/caffe2 folder.
|
||||
BUILD_LIBTORCH_PY=$PWD/tools/build_libtorch.py
|
||||
pushd "${CPP_BUILD}/caffe2" || exit
|
||||
VERBOSE=1 DEBUG=1 python "${BUILD_LIBTORCH_PY}"
|
||||
popd || exit
|
||||
|
||||
"${CPP_BUILD}/caffe2/build/bin/test_lite_interpreter_runtime"
|
||||
}
|
||||
|
||||
print_cmake_info
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} = *arm64* ]]; then
|
||||
if [[ $(uname -m) == "arm64" ]]; then
|
||||
compile_arm64
|
||||
else
|
||||
cross_compile_arm64
|
||||
fi
|
||||
elif [[ ${BUILD_ENVIRONMENT} = *lite-interpreter* ]]; then
|
||||
export BUILD_LITE_INTERPRETER=1
|
||||
build_lite_interpreter
|
||||
else
|
||||
compile_x86_64
|
||||
fi
|
||||
|
||||
if which sccache > /dev/null; then
|
||||
print_sccache_stats
|
||||
fi
|
||||
|
||||
python tools/stats/export_test_times.py
|
||||
|
||||
assert_git_not_dirty
|
||||
@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Common prelude for macos-build.sh and macos-test.sh
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
sysctl -a | grep machdep.cpu
|
||||
|
||||
# These are required for both the build job and the test job.
|
||||
# In the latter to test cpp extensions.
|
||||
export MACOSX_DEPLOYMENT_TARGET=11.0
|
||||
export CXX=clang++
|
||||
export CC=clang
|
||||
|
||||
print_cmake_info() {
|
||||
CMAKE_EXEC=$(which cmake)
|
||||
echo "$CMAKE_EXEC"
|
||||
|
||||
CONDA_INSTALLATION_DIR=$(dirname "$CMAKE_EXEC")
|
||||
# Print all libraries under cmake rpath for debugging
|
||||
ls -la "$CONDA_INSTALLATION_DIR/../lib"
|
||||
|
||||
export CMAKE_EXEC
|
||||
# Explicitly add conda env lib folder to cmake rpath to address the flaky issue
|
||||
# where cmake dependencies couldn't be found. This seems to point to how conda
|
||||
# links $CMAKE_EXEC to its package cache when cloning a new environment
|
||||
install_name_tool -add_rpath @executable_path/../lib "${CMAKE_EXEC}" || true
|
||||
# Adding the rpath will invalidate cmake signature, so signing it again here
|
||||
# to trust the executable. EXC_BAD_ACCESS (SIGKILL (Code Signature Invalid))
|
||||
# with an exit code 137 otherwise
|
||||
codesign -f -s - "${CMAKE_EXEC}" || true
|
||||
}
|
||||
@ -1,167 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
# shellcheck source=./macos-common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/macos-common.sh"
|
||||
|
||||
if [[ -n "$CONDA_ENV" ]]; then
|
||||
# Use binaries under conda environment
|
||||
export PATH="$CONDA_ENV/bin":$PATH
|
||||
fi
|
||||
|
||||
# Test that OpenMP is enabled for non-arm64 build
|
||||
if [[ ${BUILD_ENVIRONMENT} != *arm64* ]]; then
|
||||
pushd test
|
||||
if [[ ! $(python -c "import torch; print(int(torch.backends.openmp.is_available()))") == "1" ]]; then
|
||||
echo "Build should have OpenMP enabled, but torch.backends.openmp.is_available() is False"
|
||||
exit 1
|
||||
fi
|
||||
popd
|
||||
fi
|
||||
|
||||
setup_test_python() {
|
||||
# The CircleCI worker hostname doesn't resolve to an address.
|
||||
# This environment variable makes ProcessGroupGloo default to
|
||||
# using the address associated with the loopback interface.
|
||||
export GLOO_SOCKET_IFNAME=lo0
|
||||
echo "Ninja version: $(ninja --version)"
|
||||
echo "Python version: $(which python) ($(python --version))"
|
||||
|
||||
# Increase default limit on open file handles from 256 to 1024
|
||||
ulimit -n 1024
|
||||
}
|
||||
|
||||
test_python_all() {
|
||||
setup_test_python
|
||||
|
||||
time python test/run_test.py --verbose --exclude-jit-executor
|
||||
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_python_shard() {
|
||||
if [[ -z "$NUM_TEST_SHARDS" ]]; then
|
||||
echo "NUM_TEST_SHARDS must be defined to run a Python test shard"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
setup_test_python
|
||||
|
||||
time python test/run_test.py --verbose --exclude-jit-executor --exclude-distributed-tests --shard "$1" "$NUM_TEST_SHARDS"
|
||||
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_libtorch() {
|
||||
# C++ API
|
||||
|
||||
if [[ "$BUILD_TEST_LIBTORCH" == "1" ]]; then
|
||||
# NB: Install outside of source directory (at the same level as the root
|
||||
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
|
||||
# But still clean it before we perform our own build.
|
||||
|
||||
echo "Testing libtorch"
|
||||
|
||||
CPP_BUILD="$PWD/../cpp-build"
|
||||
rm -rf "$CPP_BUILD"
|
||||
mkdir -p "$CPP_BUILD"/caffe2
|
||||
|
||||
BUILD_LIBTORCH_PY=$PWD/tools/build_libtorch.py
|
||||
pushd "$CPP_BUILD"/caffe2
|
||||
VERBOSE=1 DEBUG=1 python "$BUILD_LIBTORCH_PY"
|
||||
popd
|
||||
|
||||
MNIST_DIR="${PWD}/test/cpp/api/mnist"
|
||||
python tools/download_mnist.py --quiet -d "${MNIST_DIR}"
|
||||
|
||||
# Unfortunately it seems like the test can't load from miniconda3
|
||||
# without these paths being set
|
||||
export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$PWD/miniconda3/lib"
|
||||
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$PWD/miniconda3/lib"
|
||||
TORCH_CPP_TEST_MNIST_PATH="${MNIST_DIR}" CPP_TESTS_DIR="${CPP_BUILD}/caffe2/bin" python test/run_test.py --cpp --verbose -i cpp/test_api
|
||||
|
||||
assert_git_not_dirty
|
||||
fi
|
||||
}
|
||||
|
||||
test_custom_backend() {
|
||||
print_cmake_info
|
||||
|
||||
echo "Testing custom backends"
|
||||
pushd test/custom_backend
|
||||
rm -rf build && mkdir build
|
||||
pushd build
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
CMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" "${CMAKE_EXEC}" ..
|
||||
make VERBOSE=1
|
||||
popd
|
||||
|
||||
# Run Python tests and export a lowered module.
|
||||
python test_custom_backend.py -v
|
||||
python backend.py --export-module-to=model.pt
|
||||
# Run C++ tests using the exported module.
|
||||
build/test_custom_backend ./model.pt
|
||||
rm -f ./model.pt
|
||||
popd
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_custom_script_ops() {
|
||||
print_cmake_info
|
||||
|
||||
echo "Testing custom script operators"
|
||||
pushd test/custom_operator
|
||||
# Build the custom operator library.
|
||||
rm -rf build && mkdir build
|
||||
pushd build
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
CMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" "${CMAKE_EXEC}" ..
|
||||
make VERBOSE=1
|
||||
popd
|
||||
|
||||
# Run tests Python-side and export a script module.
|
||||
python test_custom_ops.py -v
|
||||
python model.py --export-script-module=model.pt
|
||||
# Run tests C++-side and load the exported script module.
|
||||
build/test_custom_ops ./model.pt
|
||||
popd
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_jit_hooks() {
|
||||
print_cmake_info
|
||||
|
||||
echo "Testing jit hooks in cpp"
|
||||
pushd test/jit_hooks
|
||||
# Build the custom operator library.
|
||||
rm -rf build && mkdir build
|
||||
pushd build
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
CMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" "${CMAKE_EXEC}" ..
|
||||
make VERBOSE=1
|
||||
popd
|
||||
|
||||
# Run tests Python-side and export a script module.
|
||||
python model.py --export-script-module=model
|
||||
# Run tests C++-side and load the exported script module.
|
||||
build/test_jit_hooks ./model
|
||||
popd
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
if [[ $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
test_python_shard "${SHARD_NUMBER}"
|
||||
if [[ "${SHARD_NUMBER}" == 1 ]]; then
|
||||
test_libtorch
|
||||
test_custom_script_ops
|
||||
elif [[ "${SHARD_NUMBER}" == 2 ]]; then
|
||||
test_jit_hooks
|
||||
test_custom_backend
|
||||
fi
|
||||
else
|
||||
test_python_all
|
||||
test_libtorch
|
||||
test_custom_script_ops
|
||||
test_jit_hooks
|
||||
test_custom_backend
|
||||
fi
|
||||
@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Required environment variable: $BUILD_ENVIRONMENT
|
||||
# (This is set by default in the Docker images we build, so you don't
|
||||
# need to set it yourself.
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
echo "Testing pytorch"
|
||||
time python test/run_test.py --include test_cuda_multigpu test_cuda_primary_ctx --verbose
|
||||
|
||||
# Disabling tests to see if they solve timeout issues; see https://github.com/pytorch/pytorch/issues/70015
|
||||
# python tools/download_mnist.py --quiet -d test/cpp/api/mnist
|
||||
# OMP_NUM_THREADS=2 TORCH_CPP_TEST_MNIST_PATH="test/cpp/api/mnist" build/bin/test_api
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_common
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_gloo
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_nccl
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_gloo
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_nccl
|
||||
time python test/run_test.py --verbose -i distributed/test_store
|
||||
time python test/run_test.py --verbose -i distributed/test_pg_wrapper
|
||||
time python test/run_test.py --verbose -i distributed/rpc/cuda/test_tensorpipe_agent
|
||||
# FSDP tests
|
||||
for f in test/distributed/fsdp/*.py ; do time python test/run_test.py --verbose -i "${f#*/}" ; done
|
||||
# ShardedTensor tests
|
||||
time python test/run_test.py --verbose -i distributed/checkpoint/test_checkpoint
|
||||
time python test/run_test.py --verbose -i distributed/checkpoint/test_file_system_checkpoint
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharding_spec/test_sharding_spec
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharding_plan/test_sharding_plan
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/test_sharded_tensor
|
||||
time python test/run_test.py --verbose -i distributed/_shard/sharded_tensor/test_sharded_tensor_reshard
|
||||
|
||||
# functional collective tests
|
||||
time python test/run_test.py --verbose -i distributed/test_functional_api
|
||||
|
||||
|
||||
# DTensor tests
|
||||
time python test/run_test.py --verbose -i distributed/_tensor/test_random_ops
|
||||
time python test/run_test.py --verbose -i distributed/_tensor/test_dtensor_compile
|
||||
|
||||
# DeviceMesh test
|
||||
time python test/run_test.py --verbose -i distributed/test_device_mesh
|
||||
|
||||
# DTensor/TP tests
|
||||
time python test/run_test.py --verbose -i distributed/tensor/parallel/test_ddp_2d_parallel
|
||||
time python test/run_test.py --verbose -i distributed/tensor/parallel/test_fsdp_2d_parallel
|
||||
time python test/run_test.py --verbose -i distributed/tensor/parallel/test_tp_examples
|
||||
|
||||
# Other tests
|
||||
time python test/run_test.py --verbose -i test_cuda_primary_ctx
|
||||
time python test/run_test.py --verbose -i test_optim -- -k optimizers_with_varying_tensors
|
||||
time python test/run_test.py --verbose -i test_foreach -- -k test_tensors_grouping
|
||||
assert_git_not_dirty
|
||||
@ -1,90 +0,0 @@
|
||||
import argparse
|
||||
import json
|
||||
import math
|
||||
import sys
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--test-name", dest="test_name", action="store", required=True, help="test name"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sample-stats",
|
||||
dest="sample_stats",
|
||||
action="store",
|
||||
required=True,
|
||||
help="stats from sample",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--update",
|
||||
action="store_true",
|
||||
help="whether to update baseline using stats from sample",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
test_name = args.test_name
|
||||
|
||||
if "cpu" in test_name:
|
||||
backend = "cpu"
|
||||
elif "gpu" in test_name:
|
||||
backend = "gpu"
|
||||
|
||||
data_file_path = f"../{backend}_runtime.json"
|
||||
|
||||
with open(data_file_path) as data_file:
|
||||
data = json.load(data_file)
|
||||
|
||||
if test_name in data:
|
||||
mean = float(data[test_name]["mean"])
|
||||
sigma = float(data[test_name]["sigma"])
|
||||
else:
|
||||
# Let the test pass if baseline number doesn't exist
|
||||
mean = sys.maxsize
|
||||
sigma = 0.001
|
||||
|
||||
print("population mean: ", mean)
|
||||
print("population sigma: ", sigma)
|
||||
|
||||
# Let the test pass if baseline number is NaN (which happened in
|
||||
# the past when we didn't have logic for catching NaN numbers)
|
||||
if math.isnan(mean) or math.isnan(sigma):
|
||||
mean = sys.maxsize
|
||||
sigma = 0.001
|
||||
|
||||
sample_stats_data = json.loads(args.sample_stats)
|
||||
|
||||
sample_mean = float(sample_stats_data["mean"])
|
||||
sample_sigma = float(sample_stats_data["sigma"])
|
||||
|
||||
print("sample mean: ", sample_mean)
|
||||
print("sample sigma: ", sample_sigma)
|
||||
|
||||
if math.isnan(sample_mean):
|
||||
raise Exception("""Error: sample mean is NaN""")
|
||||
elif math.isnan(sample_sigma):
|
||||
raise Exception("""Error: sample sigma is NaN""")
|
||||
|
||||
z_value = (sample_mean - mean) / sigma
|
||||
|
||||
print("z-value: ", z_value)
|
||||
|
||||
if z_value >= 3:
|
||||
raise Exception(
|
||||
f"""\n
|
||||
z-value >= 3, there is high chance of perf regression.\n
|
||||
To reproduce this regression, run
|
||||
`cd .ci/pytorch/perf_test/ && bash {test_name}.sh` on your local machine
|
||||
and compare the runtime before/after your code change.
|
||||
"""
|
||||
)
|
||||
else:
|
||||
print("z-value < 3, no perf regression detected.")
|
||||
if args.update:
|
||||
print("We will use these numbers as new baseline.")
|
||||
new_data_file_path = f"../new_{backend}_runtime.json"
|
||||
with open(new_data_file_path) as new_data_file:
|
||||
new_data = json.load(new_data_file)
|
||||
new_data[test_name] = {}
|
||||
new_data[test_name]["mean"] = sample_mean
|
||||
new_data[test_name]["sigma"] = max(sample_sigma, sample_mean * 0.1)
|
||||
with open(new_data_file_path, "w") as new_data_file:
|
||||
json.dump(new_data, new_data_file, indent=4)
|
||||
@ -1,13 +0,0 @@
|
||||
import json
|
||||
import sys
|
||||
|
||||
data_file_path = sys.argv[1]
|
||||
commit_hash = sys.argv[2]
|
||||
|
||||
with open(data_file_path) as data_file:
|
||||
data = json.load(data_file)
|
||||
|
||||
data["commit"] = commit_hash
|
||||
|
||||
with open(data_file_path, "w") as data_file:
|
||||
json.dump(data, data_file)
|
||||
@ -1,17 +0,0 @@
|
||||
import sys
|
||||
|
||||
log_file_path = sys.argv[1]
|
||||
|
||||
with open(log_file_path) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
for line in lines:
|
||||
# Ignore errors from CPU instruction set, symbol existing testing,
|
||||
# or compilation error formatting
|
||||
ignored_keywords = [
|
||||
"src.c",
|
||||
"CheckSymbolExists.c",
|
||||
"test_compilation_error_formatting",
|
||||
]
|
||||
if all(keyword not in line for keyword in ignored_keywords):
|
||||
print(line)
|
||||
@ -1,71 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT_PARENT_DIR=$(dirname "${BASH_SOURCE[0]}")
|
||||
|
||||
# shellcheck source=.ci/pytorch/common.sh
|
||||
source "$SCRIPT_PARENT_DIR/common.sh"
|
||||
|
||||
cd .ci/pytorch/perf_test
|
||||
|
||||
echo "Running CPU perf test for PyTorch..."
|
||||
|
||||
pip install -q awscli
|
||||
|
||||
# Set multipart_threshold to be sufficiently high, so that `aws s3 cp` is not a multipart read
|
||||
# More info at https://github.com/aws/aws-cli/issues/2321
|
||||
aws configure set default.s3.multipart_threshold 5GB
|
||||
UPSTREAM_DEFAULT_BRANCH="$(git remote show https://github.com/pytorch/pytorch.git | awk '/HEAD branch/ {print $NF}')"
|
||||
|
||||
if [[ "$COMMIT_SOURCE" == "$UPSTREAM_DEFAULT_BRANCH" ]]; then
|
||||
# Get current default branch commit hash
|
||||
DEFAULT_BRANCH_COMMIT_ID=$(git log --format="%H" -n 1)
|
||||
export DEFAULT_BRANCH_COMMIT_ID
|
||||
fi
|
||||
|
||||
# Find the default branch commit to test against
|
||||
git remote add upstream https://github.com/pytorch/pytorch.git
|
||||
git fetch upstream
|
||||
IFS=$'\n'
|
||||
while IFS='' read -r commit_id; do
|
||||
if aws s3 ls s3://ossci-perf-test/pytorch/cpu_runtime/"${commit_id}".json; then
|
||||
LATEST_TESTED_COMMIT=${commit_id}
|
||||
break
|
||||
fi
|
||||
done < <(git rev-list upstream/"$UPSTREAM_DEFAULT_BRANCH")
|
||||
aws s3 cp s3://ossci-perf-test/pytorch/cpu_runtime/"${LATEST_TESTED_COMMIT}".json cpu_runtime.json
|
||||
|
||||
if [[ "$COMMIT_SOURCE" == "$UPSTREAM_DEFAULT_BRANCH" ]]; then
|
||||
# Prepare new baseline file
|
||||
cp cpu_runtime.json new_cpu_runtime.json
|
||||
python update_commit_hash.py new_cpu_runtime.json "${DEFAULT_BRANCH_COMMIT_ID}"
|
||||
fi
|
||||
|
||||
# Include tests
|
||||
# shellcheck source=./perf_test/test_cpu_speed_mini_sequence_labeler.sh
|
||||
. ./test_cpu_speed_mini_sequence_labeler.sh
|
||||
# shellcheck source=./perf_test/test_cpu_speed_mnist.sh
|
||||
. ./test_cpu_speed_mnist.sh
|
||||
# shellcheck source=./perf_test/test_cpu_speed_torch.sh
|
||||
. ./test_cpu_speed_torch.sh
|
||||
# shellcheck source=./perf_test/test_cpu_speed_torch_tensor.sh
|
||||
. ./test_cpu_speed_torch_tensor.sh
|
||||
|
||||
# Run tests
|
||||
export TEST_MODE="compare_with_baseline"
|
||||
if [[ "$COMMIT_SOURCE" == "$UPSTREAM_DEFAULT_BRANCH" ]]; then
|
||||
export TEST_MODE="compare_and_update"
|
||||
fi
|
||||
|
||||
# Operator tests
|
||||
run_test test_cpu_speed_torch ${TEST_MODE}
|
||||
run_test test_cpu_speed_torch_tensor ${TEST_MODE}
|
||||
|
||||
# Sample model tests
|
||||
run_test test_cpu_speed_mini_sequence_labeler 20 ${TEST_MODE}
|
||||
run_test test_cpu_speed_mnist 20 ${TEST_MODE}
|
||||
|
||||
if [[ "$COMMIT_SOURCE" == "$UPSTREAM_DEFAULT_BRANCH" ]]; then
|
||||
# This could cause race condition if we are testing the same default branch commit twice,
|
||||
# but the chance of them executing this line at the same time is low.
|
||||
aws s3 cp new_cpu_runtime.json s3://ossci-perf-test/pytorch/cpu_runtime/"${DEFAULT_BRANCH_COMMIT_ID}".json --acl public-read
|
||||
fi
|
||||
@ -1,76 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
|
||||
|
||||
pushd .ci/pytorch/perf_test
|
||||
|
||||
echo "Running GPU perf test for PyTorch..."
|
||||
|
||||
# Trying to uninstall PyYAML can cause problem. Workaround according to:
|
||||
# https://github.com/pypa/pip/issues/5247#issuecomment-415571153
|
||||
pip install -q awscli --ignore-installed PyYAML
|
||||
|
||||
# Set multipart_threshold to be sufficiently high, so that `aws s3 cp` is not a multipart read
|
||||
# More info at https://github.com/aws/aws-cli/issues/2321
|
||||
aws configure set default.s3.multipart_threshold 5GB
|
||||
UPSTREAM_DEFAULT_BRANCH="$(git remote show https://github.com/pytorch/pytorch.git | awk '/HEAD branch/ {print $NF}')"
|
||||
|
||||
if [[ "$COMMIT_SOURCE" == "$UPSTREAM_DEFAULT_BRANCH" ]]; then
|
||||
# Get current default branch commit hash
|
||||
DEFAULT_BRANCH_COMMIT_ID=$(git log --format="%H" -n 1)
|
||||
export DEFAULT_BRANCH_COMMIT_ID
|
||||
fi
|
||||
|
||||
# Find the default branch commit to test against
|
||||
git remote add upstream https://github.com/pytorch/pytorch.git
|
||||
git fetch upstream
|
||||
IFS=$'\n'
|
||||
while IFS='' read -r commit_id; do
|
||||
if aws s3 ls s3://ossci-perf-test/pytorch/gpu_runtime/"${commit_id}".json; then
|
||||
LATEST_TESTED_COMMIT=${commit_id}
|
||||
break
|
||||
fi
|
||||
done < <(git rev-list upstream/"$UPSTREAM_DEFAULT_BRANCH")
|
||||
aws s3 cp s3://ossci-perf-test/pytorch/gpu_runtime/"${LATEST_TESTED_COMMIT}".json gpu_runtime.json
|
||||
|
||||
if [[ "$COMMIT_SOURCE" == "$UPSTREAM_DEFAULT_BRANCH" ]]; then
|
||||
# Prepare new baseline file
|
||||
cp gpu_runtime.json new_gpu_runtime.json
|
||||
python update_commit_hash.py new_gpu_runtime.json "${DEFAULT_BRANCH_COMMIT_ID}"
|
||||
fi
|
||||
|
||||
# Include tests
|
||||
# shellcheck source=./perf_test/test_gpu_speed_mnist.sh
|
||||
. ./test_gpu_speed_mnist.sh
|
||||
# shellcheck source=./perf_test/test_gpu_speed_word_language_model.sh
|
||||
. ./test_gpu_speed_word_language_model.sh
|
||||
# shellcheck source=./perf_test/test_gpu_speed_cudnn_lstm.sh
|
||||
. ./test_gpu_speed_cudnn_lstm.sh
|
||||
# shellcheck source=./perf_test/test_gpu_speed_lstm.sh
|
||||
. ./test_gpu_speed_lstm.sh
|
||||
# shellcheck source=./perf_test/test_gpu_speed_mlstm.sh
|
||||
. ./test_gpu_speed_mlstm.sh
|
||||
|
||||
# Run tests
|
||||
if [[ "$COMMIT_SOURCE" == "$UPSTREAM_DEFAULT_BRANCH" ]]; then
|
||||
run_test test_gpu_speed_mnist 20 compare_and_update
|
||||
run_test test_gpu_speed_word_language_model 20 compare_and_update
|
||||
run_test test_gpu_speed_cudnn_lstm 20 compare_and_update
|
||||
run_test test_gpu_speed_lstm 20 compare_and_update
|
||||
run_test test_gpu_speed_mlstm 20 compare_and_update
|
||||
else
|
||||
run_test test_gpu_speed_mnist 20 compare_with_baseline
|
||||
run_test test_gpu_speed_word_language_model 20 compare_with_baseline
|
||||
run_test test_gpu_speed_cudnn_lstm 20 compare_with_baseline
|
||||
run_test test_gpu_speed_lstm 20 compare_with_baseline
|
||||
run_test test_gpu_speed_mlstm 20 compare_with_baseline
|
||||
fi
|
||||
|
||||
if [[ "$COMMIT_SOURCE" == "$UPSTREAM_DEFAULT_BRANCH" ]]; then
|
||||
# This could cause race condition if we are testing the same default branch commit twice,
|
||||
# but the chance of them executing this line at the same time is low.
|
||||
aws s3 cp new_gpu_runtime.json s3://ossci-perf-test/pytorch/gpu_runtime/"${DEFAULT_BRANCH_COMMIT_ID}".json --acl public-read
|
||||
fi
|
||||
|
||||
popd
|
||||
1141
.ci/pytorch/test.sh
1141
.ci/pytorch/test.sh
File diff suppressed because it is too large
Load Diff
@ -1,47 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# If you want to rebuild, run this with REBUILD=1
|
||||
# If you want to build with CUDA, run this with USE_CUDA=1
|
||||
# If you want to build without CUDA, run this with USE_CUDA=0
|
||||
|
||||
if [ ! -f setup.py ]; then
|
||||
echo "ERROR: Please run this build script from PyTorch root directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SCRIPT_PARENT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
# shellcheck source=./common.sh
|
||||
source "$SCRIPT_PARENT_DIR/common.sh"
|
||||
# shellcheck source=./common-build.sh
|
||||
source "$SCRIPT_PARENT_DIR/common-build.sh"
|
||||
|
||||
export TMP_DIR="${PWD}/build/win_tmp"
|
||||
TMP_DIR_WIN=$(cygpath -w "${TMP_DIR}")
|
||||
export TMP_DIR_WIN
|
||||
export PYTORCH_FINAL_PACKAGE_DIR=${PYTORCH_FINAL_PACKAGE_DIR:-/c/w/build-results}
|
||||
if [[ -n "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true
|
||||
fi
|
||||
|
||||
export SCRIPT_HELPERS_DIR=$SCRIPT_PARENT_DIR/win-test-helpers
|
||||
|
||||
set +ex
|
||||
grep -E -R 'PyLong_(From|As)(Unsigned|)Long\(' --exclude=python_numbers.h --exclude=eval_frame.c torch/
|
||||
PYLONG_API_CHECK=$?
|
||||
if [[ $PYLONG_API_CHECK == 0 ]]; then
|
||||
echo "Usage of PyLong_{From,As}{Unsigned}Long API may lead to overflow errors on Windows"
|
||||
echo "because \`sizeof(long) == 4\` and \`sizeof(unsigned long) == 4\`."
|
||||
echo "Please include \"torch/csrc/utils/python_numbers.h\" and use the correspoding APIs instead."
|
||||
echo "PyLong_FromLong -> THPUtils_packInt32 / THPUtils_packInt64"
|
||||
echo "PyLong_AsLong -> THPUtils_unpackInt (32-bit) / THPUtils_unpackLong (64-bit)"
|
||||
echo "PyLong_FromUnsignedLong -> THPUtils_packUInt32 / THPUtils_packUInt64"
|
||||
echo "PyLong_AsUnsignedLong -> THPUtils_unpackUInt32 / THPUtils_unpackUInt64"
|
||||
exit 1
|
||||
fi
|
||||
set -ex
|
||||
|
||||
"$SCRIPT_HELPERS_DIR"/build_pytorch.bat
|
||||
|
||||
assert_git_not_dirty
|
||||
|
||||
echo "BUILD PASSED"
|
||||
@ -1,138 +0,0 @@
|
||||
if "%DEBUG%" == "1" (
|
||||
set BUILD_TYPE=debug
|
||||
) ELSE (
|
||||
set BUILD_TYPE=release
|
||||
)
|
||||
|
||||
set PATH=C:\Program Files\CMake\bin;C:\Program Files\7-Zip;C:\ProgramData\chocolatey\bin;C:\Program Files\Git\cmd;C:\Program Files\Amazon\AWSCLI;C:\Program Files\Amazon\AWSCLI\bin;%PATH%
|
||||
|
||||
:: This inflates our log size slightly, but it is REALLY useful to be
|
||||
:: able to see what our cl.exe commands are (since you can actually
|
||||
:: just copy-paste them into a local Windows setup to just rebuild a
|
||||
:: single file.)
|
||||
:: log sizes are too long, but leaving this here incase someone wants to use it locally
|
||||
:: set CMAKE_VERBOSE_MAKEFILE=1
|
||||
|
||||
|
||||
set INSTALLER_DIR=%SCRIPT_HELPERS_DIR%\installation-helpers
|
||||
|
||||
|
||||
call %INSTALLER_DIR%\install_mkl.bat
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
call %INSTALLER_DIR%\install_magma.bat
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
call %INSTALLER_DIR%\install_sccache.bat
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
:: Miniconda has been installed as part of the Windows AMI with all the dependencies.
|
||||
:: We just need to activate it here
|
||||
call %INSTALLER_DIR%\activate_miniconda3.bat
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
:: Override VS env here
|
||||
pushd .
|
||||
if "%VC_VERSION%" == "" (
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio\%VC_YEAR%\%VC_PRODUCT%\VC\Auxiliary\Build\vcvarsall.bat" x64
|
||||
) else (
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio\%VC_YEAR%\%VC_PRODUCT%\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%VC_VERSION%
|
||||
)
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
@echo on
|
||||
popd
|
||||
|
||||
if not "%USE_CUDA%"=="1" goto cuda_build_end
|
||||
|
||||
set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION%
|
||||
|
||||
if x%CUDA_VERSION:.=%==x%CUDA_VERSION% (
|
||||
echo CUDA version %CUDA_VERSION% format isn't correct, which doesn't contain '.'
|
||||
exit /b 1
|
||||
)
|
||||
rem version transformer, for example 10.1 to 10_1.
|
||||
if x%CUDA_VERSION:.=%==x%CUDA_VERSION% (
|
||||
echo CUDA version %CUDA_VERSION% format isn't correct, which doesn't contain '.'
|
||||
exit /b 1
|
||||
)
|
||||
set VERSION_SUFFIX=%CUDA_VERSION:.=_%
|
||||
set CUDA_PATH_V%VERSION_SUFFIX%=%CUDA_PATH%
|
||||
|
||||
set CUDNN_LIB_DIR=%CUDA_PATH%\lib\x64
|
||||
set CUDA_TOOLKIT_ROOT_DIR=%CUDA_PATH%
|
||||
set CUDNN_ROOT_DIR=%CUDA_PATH%
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
set PATH=%CUDA_PATH%\bin;%CUDA_PATH%\libnvvp;%PATH%
|
||||
|
||||
set CUDNN_LIB_DIR=%CUDA_PATH%\lib\x64
|
||||
set CUDA_TOOLKIT_ROOT_DIR=%CUDA_PATH%
|
||||
set CUDNN_ROOT_DIR=%CUDA_PATH%
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
set PATH=%CUDA_PATH%\bin;%CUDA_PATH%\libnvvp;%PATH%
|
||||
|
||||
:cuda_build_end
|
||||
|
||||
set DISTUTILS_USE_SDK=1
|
||||
set PATH=%TMP_DIR_WIN%\bin;%PATH%
|
||||
|
||||
:: The latest Windows CUDA test is running on AWS G5 runner with A10G GPU
|
||||
if "%TORCH_CUDA_ARCH_LIST%" == "" set TORCH_CUDA_ARCH_LIST=8.6
|
||||
|
||||
:: The default sccache idle timeout is 600, which is too short and leads to intermittent build errors.
|
||||
set SCCACHE_IDLE_TIMEOUT=0
|
||||
set SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
sccache --stop-server
|
||||
sccache --start-server
|
||||
sccache --zero-stats
|
||||
set CC=sccache-cl
|
||||
set CXX=sccache-cl
|
||||
|
||||
set CMAKE_GENERATOR=Ninja
|
||||
|
||||
if "%USE_CUDA%"=="1" (
|
||||
:: randomtemp is used to resolve the intermittent build error related to CUDA.
|
||||
:: code: https://github.com/peterjc123/randomtemp-rust
|
||||
:: issue: https://github.com/pytorch/pytorch/issues/25393
|
||||
::
|
||||
:: CMake requires a single command as CUDA_NVCC_EXECUTABLE, so we push the wrappers
|
||||
:: randomtemp.exe and sccache.exe into a batch file which CMake invokes.
|
||||
curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.4/randomtemp.exe --output %TMP_DIR_WIN%\bin\randomtemp.exe
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
echo @"%TMP_DIR_WIN%\bin\randomtemp.exe" "%TMP_DIR_WIN%\bin\sccache.exe" "%CUDA_PATH%\bin\nvcc.exe" %%* > "%TMP_DIR%/bin/nvcc.bat"
|
||||
cat %TMP_DIR%/bin/nvcc.bat
|
||||
set CUDA_NVCC_EXECUTABLE=%TMP_DIR%/bin/nvcc.bat
|
||||
for /F "usebackq delims=" %%n in (`cygpath -m "%CUDA_PATH%\bin\nvcc.exe"`) do set CMAKE_CUDA_COMPILER=%%n
|
||||
set CMAKE_CUDA_COMPILER_LAUNCHER=%TMP_DIR%/bin/randomtemp.exe;%TMP_DIR%\bin\sccache.exe
|
||||
)
|
||||
|
||||
:: Print all existing environment variable for debugging
|
||||
set
|
||||
|
||||
python setup.py bdist_wheel
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
sccache --show-stats
|
||||
python -c "import os, glob; os.system('python -mpip install --no-index --no-deps ' + glob.glob('dist/*.whl')[0])"
|
||||
(
|
||||
if "%BUILD_ENVIRONMENT%"=="" (
|
||||
echo NOTE: To run `import torch`, please make sure to activate the conda environment by running `call %CONDA_PARENT_DIR%\Miniconda3\Scripts\activate.bat %CONDA_PARENT_DIR%\Miniconda3` in Command Prompt before running Git Bash.
|
||||
) else (
|
||||
copy /Y "dist\*.whl" "%PYTORCH_FINAL_PACKAGE_DIR%"
|
||||
|
||||
:: export test times so that potential sharded tests that'll branch off this build will use consistent data
|
||||
python tools/stats/export_test_times.py
|
||||
robocopy /E ".additional_ci_files" "%PYTORCH_FINAL_PACKAGE_DIR%\.additional_ci_files"
|
||||
|
||||
:: Also save build/.ninja_log as an artifact
|
||||
copy /Y "build\.ninja_log" "%PYTORCH_FINAL_PACKAGE_DIR%\"
|
||||
)
|
||||
)
|
||||
|
||||
sccache --show-stats --stats-format json | jq .stats > sccache-stats-%BUILD_ENVIRONMENT%-%OUR_GITHUB_JOB_ID%.json
|
||||
sccache --stop-server
|
||||
@ -1,26 +0,0 @@
|
||||
if "%BUILD_ENVIRONMENT%"=="" (
|
||||
set CONDA_PARENT_DIR=%CD%
|
||||
) else (
|
||||
set CONDA_PARENT_DIR=C:\Jenkins
|
||||
)
|
||||
|
||||
|
||||
:: Be conservative here when rolling out the new AMI with conda. This will try
|
||||
:: to install conda as before if it couldn't find the conda installation. This
|
||||
:: can be removed eventually after we gain enough confidence in the AMI
|
||||
if not exist %CONDA_PARENT_DIR%\Miniconda3 (
|
||||
set INSTALL_FRESH_CONDA=1
|
||||
)
|
||||
|
||||
if "%INSTALL_FRESH_CONDA%"=="1" (
|
||||
curl --retry 3 --retry-all-errors -k https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe --output %TMP_DIR_WIN%\Miniconda3-latest-Windows-x86_64.exe
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
%TMP_DIR_WIN%\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_PARENT_DIR%\Miniconda3
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
)
|
||||
|
||||
:: Activate conda so that we can use its commands, i.e. conda, python, pip
|
||||
call %CONDA_PARENT_DIR%\Miniconda3\Scripts\activate.bat %CONDA_PARENT_DIR%\Miniconda3
|
||||
@ -1,18 +0,0 @@
|
||||
mkdir %TMP_DIR_WIN%\bin
|
||||
|
||||
if "%REBUILD%"=="" (
|
||||
:check_sccache
|
||||
%TMP_DIR_WIN%\bin\sccache.exe --show-stats || (
|
||||
taskkill /im sccache.exe /f /t || ver > nul
|
||||
del %TMP_DIR_WIN%\bin\sccache.exe || ver > nul
|
||||
del %TMP_DIR_WIN%\bin\sccache-cl.exe || ver > nul
|
||||
if "%BUILD_ENVIRONMENT%"=="" (
|
||||
curl --retry 3 --retry-all-errors -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output %TMP_DIR_WIN%\bin\sccache.exe
|
||||
curl --retry 3 --retry-all-errors -k https://s3.amazonaws.com/ossci-windows/sccache-cl.exe --output %TMP_DIR_WIN%\bin\sccache-cl.exe
|
||||
) else (
|
||||
aws s3 cp s3://ossci-windows/sccache.exe %TMP_DIR_WIN%\bin\sccache.exe
|
||||
aws s3 cp s3://ossci-windows/sccache-cl.exe %TMP_DIR_WIN%\bin\sccache-cl.exe
|
||||
)
|
||||
goto :check_sccache
|
||||
)
|
||||
)
|
||||
@ -1,54 +0,0 @@
|
||||
set PATH=C:\Program Files\CMake\bin;C:\Program Files\7-Zip;C:\ProgramData\chocolatey\bin;C:\Program Files\Git\cmd;C:\Program Files\Amazon\AWSCLI;C:\Program Files\Amazon\AWSCLI\bin;%PATH%
|
||||
|
||||
:: Install Miniconda3
|
||||
set INSTALLER_DIR=%SCRIPT_HELPERS_DIR%\installation-helpers
|
||||
|
||||
:: Miniconda has been installed as part of the Windows AMI with all the dependencies.
|
||||
:: We just need to activate it here
|
||||
call %INSTALLER_DIR%\activate_miniconda3.bat
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
|
||||
:: PyTorch is now installed using the standard wheel on Windows into the conda environment.
|
||||
:: However, the test scripts are still frequently referring to the workspace temp directory
|
||||
:: build\torch. Rather than changing all these references, making a copy of torch folder
|
||||
:: from conda to the current workspace is easier. The workspace will be cleaned up after
|
||||
:: the job anyway
|
||||
xcopy /s %CONDA_PARENT_DIR%\Miniconda3\Lib\site-packages\torch %TMP_DIR_WIN%\build\torch\
|
||||
|
||||
pushd .
|
||||
if "%VC_VERSION%" == "" (
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio\%VC_YEAR%\%VC_PRODUCT%\VC\Auxiliary\Build\vcvarsall.bat" x64
|
||||
) else (
|
||||
call "C:\Program Files (x86)\Microsoft Visual Studio\%VC_YEAR%\%VC_PRODUCT%\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%VC_VERSION%
|
||||
)
|
||||
if errorlevel 1 exit /b
|
||||
if not errorlevel 0 exit /b
|
||||
@echo on
|
||||
popd
|
||||
|
||||
set DISTUTILS_USE_SDK=1
|
||||
|
||||
if not "%USE_CUDA%"=="1" goto cuda_build_end
|
||||
|
||||
set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION%
|
||||
|
||||
rem version transformer, for example 10.1 to 10_1.
|
||||
set VERSION_SUFFIX=%CUDA_VERSION:.=_%
|
||||
set CUDA_PATH_V%VERSION_SUFFIX%=%CUDA_PATH%
|
||||
|
||||
set CUDNN_LIB_DIR=%CUDA_PATH%\lib\x64
|
||||
set CUDA_TOOLKIT_ROOT_DIR=%CUDA_PATH%
|
||||
set CUDNN_ROOT_DIR=%CUDA_PATH%
|
||||
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
|
||||
set PATH=%CUDA_PATH%\bin;%CUDA_PATH%\libnvvp;%PATH%
|
||||
set NUMBAPRO_CUDALIB=%CUDA_PATH%\bin
|
||||
set NUMBAPRO_LIBDEVICE=%CUDA_PATH%\nvvm\libdevice
|
||||
set NUMBAPRO_NVVM=%CUDA_PATH%\nvvm\bin\nvvm64_32_0.dll
|
||||
|
||||
:cuda_build_end
|
||||
|
||||
set PYTHONPATH=%TMP_DIR_WIN%\build;%PYTHONPATH%
|
||||
|
||||
:: Print all existing environment variable for debugging
|
||||
set
|
||||
@ -1,54 +0,0 @@
|
||||
call %SCRIPT_HELPERS_DIR%\setup_pytorch_env.bat
|
||||
if errorlevel 1 exit /b 1
|
||||
|
||||
:: Save the current working directory so that we can go back there
|
||||
set CWD=%cd%
|
||||
|
||||
set CPP_TESTS_DIR=%TMP_DIR_WIN%\build\torch\bin
|
||||
set PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64;%TMP_DIR_WIN%\build\torch\lib;%PATH%
|
||||
|
||||
set TORCH_CPP_TEST_MNIST_PATH=%CWD%\test\cpp\api\mnist
|
||||
python tools\download_mnist.py --quiet -d %TORCH_CPP_TEST_MNIST_PATH%
|
||||
|
||||
python test\run_test.py --cpp --verbose -i cpp/test_api
|
||||
if errorlevel 1 exit /b 1
|
||||
if not errorlevel 0 exit /b 1
|
||||
|
||||
cd %TMP_DIR_WIN%\build\torch\test
|
||||
for /r "." %%a in (*.exe) do (
|
||||
call :libtorch_check "%%~na" "%%~fa"
|
||||
if errorlevel 1 goto fail
|
||||
)
|
||||
|
||||
goto :eof
|
||||
|
||||
:libtorch_check
|
||||
|
||||
cd %CWD%
|
||||
set CPP_TESTS_DIR=%TMP_DIR_WIN%\build\torch\test
|
||||
|
||||
:: Skip verify_api_visibility as it a compile level test
|
||||
if "%~1" == "verify_api_visibility" goto :eof
|
||||
|
||||
echo Running "%~2"
|
||||
if "%~1" == "c10_intrusive_ptr_benchmark" (
|
||||
:: NB: This is not a gtest executable file, thus couldn't be handled by pytest-cpp
|
||||
call "%~2"
|
||||
goto :eof
|
||||
)
|
||||
|
||||
python test\run_test.py --cpp --verbose -i "cpp/%~1"
|
||||
if errorlevel 1 (
|
||||
echo %1 failed with exit code %errorlevel%
|
||||
goto fail
|
||||
)
|
||||
if not errorlevel 0 (
|
||||
echo %1 failed with exit code %errorlevel%
|
||||
goto fail
|
||||
)
|
||||
|
||||
:eof
|
||||
exit /b 0
|
||||
|
||||
:fail
|
||||
exit /b 1
|
||||
@ -1,12 +0,0 @@
|
||||
call %SCRIPT_HELPERS_DIR%\setup_pytorch_env.bat
|
||||
|
||||
echo Copying over test times file
|
||||
robocopy /E "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.additional_ci_files" "%PROJECT_DIR_WIN%\.additional_ci_files"
|
||||
|
||||
pushd test
|
||||
|
||||
echo Run jit_profiling tests
|
||||
python run_test.py --include test_jit_legacy test_jit_fuser_legacy --verbose
|
||||
if ERRORLEVEL 1 exit /b 1
|
||||
|
||||
popd
|
||||
@ -1,37 +0,0 @@
|
||||
call %SCRIPT_HELPERS_DIR%\setup_pytorch_env.bat
|
||||
:: exit the batch once there's an error
|
||||
if not errorlevel 0 (
|
||||
echo "setup pytorch env failed"
|
||||
echo %errorlevel%
|
||||
exit /b
|
||||
)
|
||||
|
||||
pushd test
|
||||
|
||||
set GFLAGS_EXE="C:\Program Files (x86)\Windows Kits\10\Debuggers\x64\gflags.exe"
|
||||
if "%SHARD_NUMBER%" == "1" (
|
||||
if exist %GFLAGS_EXE% (
|
||||
echo Some smoke tests
|
||||
%GFLAGS_EXE% /i python.exe +sls
|
||||
python %SCRIPT_HELPERS_DIR%\run_python_nn_smoketests.py
|
||||
if ERRORLEVEL 1 goto fail
|
||||
|
||||
%GFLAGS_EXE% /i python.exe -sls
|
||||
if ERRORLEVEL 1 goto fail
|
||||
)
|
||||
)
|
||||
|
||||
echo Copying over test times file
|
||||
robocopy /E "%PYTORCH_FINAL_PACKAGE_DIR_WIN%\.additional_ci_files" "%PROJECT_DIR_WIN%\.additional_ci_files"
|
||||
|
||||
echo Run nn tests
|
||||
python run_test.py --exclude-jit-executor --exclude-distributed-tests --shard "%SHARD_NUMBER%" "%NUM_TEST_SHARDS%" --verbose
|
||||
if ERRORLEVEL 1 goto fail
|
||||
|
||||
popd
|
||||
|
||||
:eof
|
||||
exit /b 0
|
||||
|
||||
:fail
|
||||
exit /b 1
|
||||
@ -1,73 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPT_PARENT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
# shellcheck source=./common.sh
|
||||
source "$SCRIPT_PARENT_DIR/common.sh"
|
||||
|
||||
export TMP_DIR="${PWD}/build/win_tmp"
|
||||
TMP_DIR_WIN=$(cygpath -w "${TMP_DIR}")
|
||||
export TMP_DIR_WIN
|
||||
export PROJECT_DIR="${PWD}"
|
||||
PROJECT_DIR_WIN=$(cygpath -w "${PROJECT_DIR}")
|
||||
export PROJECT_DIR_WIN
|
||||
export TEST_DIR="${PWD}/test"
|
||||
TEST_DIR_WIN=$(cygpath -w "${TEST_DIR}")
|
||||
export TEST_DIR_WIN
|
||||
export PYTORCH_FINAL_PACKAGE_DIR="${PYTORCH_FINAL_PACKAGE_DIR:-/c/w/build-results}"
|
||||
PYTORCH_FINAL_PACKAGE_DIR_WIN=$(cygpath -w "${PYTORCH_FINAL_PACKAGE_DIR}")
|
||||
export PYTORCH_FINAL_PACKAGE_DIR_WIN
|
||||
|
||||
mkdir -p "$TMP_DIR"/build/torch
|
||||
|
||||
export SCRIPT_HELPERS_DIR=$SCRIPT_PARENT_DIR/win-test-helpers
|
||||
|
||||
if [[ "$TEST_CONFIG" = "force_on_cpu" ]]; then
|
||||
# run the full test suite for force_on_cpu test
|
||||
export USE_CUDA=0
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
|
||||
# Used so that only cuda/rocm specific versions of tests are generated
|
||||
# mainly used so that we're not spending extra cycles testing cpu
|
||||
# devices on expensive gpu machines
|
||||
export PYTORCH_TESTING_DEVICE_ONLY_FOR="cuda"
|
||||
fi
|
||||
|
||||
# TODO: Move both of them to Windows AMI
|
||||
python -m pip install pytest-rerunfailures==10.3 pytest-cpp==2.3.0 tensorboard==2.13.0
|
||||
|
||||
# Install Z3 optional dependency for Windows builds.
|
||||
python -m pip install z3-solver==4.12.2.0
|
||||
|
||||
run_tests() {
|
||||
# Run nvidia-smi if available
|
||||
for path in '/c/Program Files/NVIDIA Corporation/NVSMI/nvidia-smi.exe' /c/Windows/System32/nvidia-smi.exe; do
|
||||
if [[ -x "$path" ]]; then
|
||||
"$path" || echo "true";
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $NUM_TEST_SHARDS -eq 1 ]]; then
|
||||
"$SCRIPT_HELPERS_DIR"/test_python_shard.bat
|
||||
"$SCRIPT_HELPERS_DIR"/test_custom_script_ops.bat
|
||||
"$SCRIPT_HELPERS_DIR"/test_custom_backend.bat
|
||||
"$SCRIPT_HELPERS_DIR"/test_libtorch.bat
|
||||
else
|
||||
"$SCRIPT_HELPERS_DIR"/test_python_shard.bat
|
||||
if [[ "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
"$SCRIPT_HELPERS_DIR"/test_libtorch.bat
|
||||
if [[ "${USE_CUDA}" == "1" ]]; then
|
||||
"$SCRIPT_HELPERS_DIR"/test_python_jit_legacy.bat
|
||||
fi
|
||||
elif [[ "${SHARD_NUMBER}" == 2 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
"$SCRIPT_HELPERS_DIR"/test_custom_backend.bat
|
||||
"$SCRIPT_HELPERS_DIR"/test_custom_script_ops.bat
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
run_tests
|
||||
assert_git_not_dirty
|
||||
echo "TEST PASSED"
|
||||
@ -1,8 +1,3 @@
|
||||
Warning
|
||||
=======
|
||||
|
||||
Contents may be out of date. Our CircleCI workflows are gradually being migrated to Github actions.
|
||||
|
||||
Structure of CI
|
||||
===============
|
||||
|
||||
@ -21,6 +16,8 @@ setup job:
|
||||
not, even if there isn't a Git checkout.
|
||||
|
||||
|
||||
|
||||
|
||||
CircleCI configuration generator
|
||||
================================
|
||||
|
||||
@ -59,6 +56,7 @@ See comment [here](https://github.com/pytorch/pytorch/pull/17323#pullrequestrevi
|
||||
|
||||
In contrast with a full recursive tree traversal of configuration dimensions,
|
||||
> in the future I think we actually want to decrease our matrix somewhat and have only a few mostly-orthogonal builds that taste as many different features as possible on PRs, plus a more complete suite on every PR and maybe an almost full suite nightly/weekly (we don't have this yet). Specifying PR jobs in the future might be easier to read with an explicit list when we come to this.
|
||||
|
||||
----------------
|
||||
----------------
|
||||
|
||||
@ -73,9 +71,9 @@ A **binary configuration** is a collection of
|
||||
* release or nightly
|
||||
* releases are stable, nightlies are beta and built every night
|
||||
* python version
|
||||
* linux: 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
|
||||
* macos: 3.7, 3.8
|
||||
* windows: 3.7, 3.8
|
||||
* linux: 3.5m, 3.6m 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
|
||||
* macos: 3.6, 3.7, 3.8
|
||||
* windows: 3.6, 3.7, 3.8
|
||||
* cpu version
|
||||
* cpu, cuda 9.0, cuda 10.0
|
||||
* The supported cuda versions occasionally change
|
||||
@ -106,7 +104,7 @@ All binaries are built in CircleCI workflows except Windows. There are checked-i
|
||||
|
||||
Some quick vocab:
|
||||
|
||||
* A \**workflow** is a CircleCI concept; it is a DAG of '**jobs**'. ctrl-f 'workflows' on https://github.com/pytorch/pytorch/blob/main/.circleci/config.yml to see the workflows.
|
||||
* A \**workflow** is a CircleCI concept; it is a DAG of '**jobs**'. ctrl-f 'workflows' on https://github.com/pytorch/pytorch/blob/master/.circleci/config.yml to see the workflows.
|
||||
* **jobs** are a sequence of '**steps**'
|
||||
* **steps** are usually just a bash script or a builtin CircleCI command. *All steps run in new environments, environment variables declared in one script DO NOT persist to following steps*
|
||||
* CircleCI has a **workspace**, which is essentially a cache between steps of the *same job* in which you can store artifacts between steps.
|
||||
@ -117,8 +115,8 @@ The nightly binaries have 3 workflows. We have one job (actually 3 jobs: build,
|
||||
|
||||
1. binary_builds
|
||||
1. every day midnight EST
|
||||
2. linux: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
3. macos: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
2. linux: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
3. macos: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
4. For each binary configuration, e.g. linux_conda_3.7_cpu there is a
|
||||
1. binary_linux_conda_3.7_cpu_build
|
||||
1. Builds the build. On linux jobs this uses the 'docker executor'.
|
||||
@ -133,12 +131,12 @@ The nightly binaries have 3 workflows. We have one job (actually 3 jobs: build,
|
||||
2. Uploads the package
|
||||
2. update_s3_htmls
|
||||
1. every day 5am EST
|
||||
2. https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
2. https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
3. See below for what these are for and why they're needed
|
||||
4. Three jobs that each examine the current contents of aws and the conda repo and update some html files in s3
|
||||
3. binarysmoketests
|
||||
1. every day
|
||||
2. https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
2. https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
3. For each binary configuration, e.g. linux_conda_3.7_cpu there is a
|
||||
1. smoke_linux_conda_3.7_cpu
|
||||
1. Downloads the package from the cloud, e.g. using the official pip or conda instructions
|
||||
@ -146,26 +144,26 @@ The nightly binaries have 3 workflows. We have one job (actually 3 jobs: build,
|
||||
|
||||
## How are the jobs structured?
|
||||
|
||||
The jobs are in https://github.com/pytorch/pytorch/tree/main/.circleci/verbatim-sources. Jobs are made of multiple steps. There are some shared steps used by all the binaries/smokes. Steps of these jobs are all delegated to scripts in https://github.com/pytorch/pytorch/tree/main/.circleci/scripts .
|
||||
The jobs are in https://github.com/pytorch/pytorch/tree/master/.circleci/verbatim-sources. Jobs are made of multiple steps. There are some shared steps used by all the binaries/smokes. Steps of these jobs are all delegated to scripts in https://github.com/pytorch/pytorch/tree/master/.circleci/scripts .
|
||||
|
||||
* Linux jobs: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
* Linux jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
* binary_linux_build.sh
|
||||
* binary_linux_test.sh
|
||||
* binary_linux_upload.sh
|
||||
* MacOS jobs: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
* MacOS jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
* binary_macos_build.sh
|
||||
* binary_macos_test.sh
|
||||
* binary_macos_upload.sh
|
||||
* Update html jobs: https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
* Update html jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
* These delegate from the pytorch/builder repo
|
||||
* https://github.com/pytorch/builder/blob/main/cron/update_s3_htmls.sh
|
||||
* https://github.com/pytorch/builder/blob/main/cron/upload_binary_sizes.sh
|
||||
* Smoke jobs (both linux and macos): https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
* https://github.com/pytorch/builder/blob/master/cron/update_s3_htmls.sh
|
||||
* https://github.com/pytorch/builder/blob/master/cron/upload_binary_sizes.sh
|
||||
* Smoke jobs (both linux and macos): https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
* These delegate from the pytorch/builder repo
|
||||
* https://github.com/pytorch/builder/blob/main/run_tests.sh
|
||||
* https://github.com/pytorch/builder/blob/main/smoke_test.sh
|
||||
* https://github.com/pytorch/builder/blob/main/check_binary.sh
|
||||
* Common shared code (shared across linux and macos): https://github.com/pytorch/pytorch/blob/main/.circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
* https://github.com/pytorch/builder/blob/master/run_tests.sh
|
||||
* https://github.com/pytorch/builder/blob/master/smoke_test.sh
|
||||
* https://github.com/pytorch/builder/blob/master/check_binary.sh
|
||||
* Common shared code (shared across linux and macos): https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
* binary_checkout.sh - checks out pytorch/builder repo. Right now this also checks out pytorch/pytorch, but it shouldn't. pytorch/pytorch should just be shared through the workspace. This can handle being run before binary_populate_env.sh
|
||||
* binary_populate_env.sh - parses BUILD_ENVIRONMENT into the separate env variables that make up a binary configuration. Also sets lots of default values, the date, the version strings, the location of folders in s3, all sorts of things. This generally has to be run before other steps.
|
||||
* binary_install_miniconda.sh - Installs miniconda, cross platform. Also hacks this for the update_binary_sizes job that doesn't have the right env variables
|
||||
@ -190,6 +188,18 @@ binary_run_in_docker.sh is a way to share the docker start-up code between the b
|
||||
|
||||
We want all the nightly binary jobs to run on the exact same git commit, so we wrote our own checkout logic to ensure that the same commit was always picked. Later circleci changed that to use a single pytorch checkout and persist it through the workspace (they did this because our config file was too big, so they wanted to take a lot of the setup code into scripts, but the scripts needed the code repo to exist to be called, so they added a prereq step called 'setup' to checkout the code and persist the needed scripts to the workspace). The changes to the binary jobs were not properly tested, so they all broke from missing pytorch code no longer existing. We hotfixed the problem by adding the pytorch checkout back to binary_checkout, so now there's two checkouts of pytorch on the binary jobs. This problem still needs to be fixed, but it takes careful tracing of which code is being called where.
|
||||
|
||||
# Azure Pipelines structure of the binaries
|
||||
|
||||
TODO: fill in stuff
|
||||
|
||||
## How are the workflows structured?
|
||||
|
||||
TODO: fill in stuff
|
||||
|
||||
## How are the jobs structured?
|
||||
|
||||
TODO: fill in stuff
|
||||
|
||||
# Code structure of the binaries (circleci agnostic)
|
||||
|
||||
## Overview
|
||||
@ -205,22 +215,28 @@ pytorch/pytorch
|
||||
- config.yml # GENERATED file that actually controls all circleci behavior
|
||||
- verbatim-sources # Used to generate job/workflow sections in ^
|
||||
- scripts/ # Code needed to prepare circleci environments for binary build scripts
|
||||
|
||||
- setup.py # Builds pytorch. This is wrapped in pytorch/builder
|
||||
- cmake files # used in normal building of pytorch
|
||||
|
||||
# All code needed to prepare a binary build, given an environment
|
||||
# with all the right variables/packages/paths.
|
||||
pytorch/builder
|
||||
|
||||
# Given an installed binary and a proper python env, runs some checks
|
||||
# to make sure the binary was built the proper way. Checks things like
|
||||
# the library dependencies, symbols present, etc.
|
||||
- check_binary.sh
|
||||
|
||||
# Given an installed binary, runs python tests to make sure everything
|
||||
# is in order. These should be de-duped. Right now they both run smoke
|
||||
# tests, but are called from different places. Usually just call some
|
||||
# import statements, but also has overlap with check_binary.sh above
|
||||
- run_tests.sh
|
||||
- smoke_test.sh
|
||||
|
||||
# Folders that govern how packages are built. See paragraphs below
|
||||
|
||||
- conda/
|
||||
- build_pytorch.sh # Entrypoint. Delegates to proper conda build folder
|
||||
- switch_cuda_version.sh # Switches activate CUDA installation in Docker
|
||||
@ -308,7 +324,7 @@ Note that the Windows Python wheels are still built in conda environments. Some
|
||||
|
||||
* These should all be consolidated
|
||||
* These must run on all OS types: MacOS, Linux, and Windows
|
||||
* These all run smoke tests at the moment. They inspect the packages some, maybe run a few import statements. They DO NOT run the python tests nor the cpp tests. The idea is that python tests on main and PR merges will catch all breakages. All these tests have to do is make sure the special binary machinery didn’t mess anything up.
|
||||
* These all run smoke tests at the moment. They inspect the packages some, maybe run a few import statements. They DO NOT run the python tests nor the cpp tests. The idea is that python tests on master and PR merges will catch all breakages. All these tests have to do is make sure the special binary machinery didn’t mess anything up.
|
||||
* There are separate run_tests.sh and smoke_test.sh because one used to be called by the smoke jobs and one used to be called by the binary test jobs (see circleci structure section above). This is still true actually, but these could be united into a single script that runs these checks, given an installed pytorch package.
|
||||
|
||||
### Note on libtorch
|
||||
@ -340,7 +356,7 @@ The Dockerfiles are available in pytorch/builder, but there is no circleci job o
|
||||
|
||||
tl;dr make a PR that looks like https://github.com/pytorch/pytorch/pull/21159
|
||||
|
||||
Sometimes we want to push a change to mainand then rebuild all of today's binaries after that change. As of May 30, 2019 there isn't a way to manually run a workflow in the UI. You can manually re-run a workflow, but it will use the exact same git commits as the first run and will not include any changes. So we have to make a PR and then force circleci to run the binary workflow instead of the normal tests. The above PR is an example of how to do this; essentially you copy-paste the binarybuilds workflow steps into the default workflow steps. If you need to point the builder repo to a different commit then you'd need to change https://github.com/pytorch/pytorch/blob/main/.circleci/scripts/binary_checkout.sh#L42-L45 to checkout what you want.
|
||||
Sometimes we want to push a change to master and then rebuild all of today's binaries after that change. As of May 30, 2019 there isn't a way to manually run a workflow in the UI. You can manually re-run a workflow, but it will use the exact same git commits as the first run and will not include any changes. So we have to make a PR and then force circleci to run the binary workflow instead of the normal tests. The above PR is an example of how to do this; essentially you copy-paste the binarybuilds workflow steps into the default workflow steps. If you need to point the builder repo to a different commit then you'd need to change https://github.com/pytorch/pytorch/blob/master/.circleci/scripts/binary_checkout.sh#L42-L45 to checkout what you want.
|
||||
|
||||
## How to test changes to the binaries via .circleci
|
||||
|
||||
@ -349,28 +365,36 @@ Writing PRs that test the binaries is annoying, since the default circleci jobs
|
||||
```sh
|
||||
# Make your changes
|
||||
touch .circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
|
||||
# Regenerate the yaml, has to be in python 3.7
|
||||
.circleci/regenerate.sh
|
||||
|
||||
# Make a commit
|
||||
git add .circleci *
|
||||
git commit -m "My real changes"
|
||||
git push origin my_branch
|
||||
|
||||
# Now hardcode the jobs that you want in the .circleci/config.yml workflows section
|
||||
# Also eliminate ensure-consistency and should_run_job checks
|
||||
# e.g. https://github.com/pytorch/pytorch/commit/2b3344bfed8772fe86e5210cc4ee915dee42b32d
|
||||
|
||||
# Make a commit you won't keep
|
||||
git add .circleci
|
||||
git commit -m "[DO NOT LAND] testing binaries for above changes"
|
||||
git push origin my_branch
|
||||
|
||||
# Now you need to make some changes to the first commit.
|
||||
git rebase -i HEAD~2 # mark the first commit as 'edit'
|
||||
|
||||
# Make the changes
|
||||
touch .circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
.circleci/regenerate.sh
|
||||
|
||||
# Ammend the commit and recontinue
|
||||
git add .circleci
|
||||
git commit --amend
|
||||
git rebase --continue
|
||||
|
||||
# Update the PR, need to force since the commits are different now
|
||||
git push origin my_branch --force
|
||||
```
|
||||
@ -399,12 +423,14 @@ docker run \
|
||||
-v your/builder/repo:/builder \
|
||||
-v where/you/want/packages/to/appear:/final_pkgs \
|
||||
-it pytorch/conda-cuda /bin/bash
|
||||
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
# You should probably always export at least these 3 variables
|
||||
export PACKAGE_TYPE=conda
|
||||
export DESIRED_PYTHON=3.7
|
||||
export DESIRED_PYTHON=3.6
|
||||
export DESIRED_CUDA=cpu
|
||||
|
||||
# Call the entrypoint
|
||||
# `|& tee foo.log` just copies all stdout and stderr output to foo.log
|
||||
# The builds generate lots of output so you probably need this when
|
||||
@ -428,6 +454,7 @@ But if you want to try, then I’d recommend
|
||||
# Create a new terminal
|
||||
# Clear your LD_LIBRARY_PATH and trim as much out of your PATH as you
|
||||
# know how to do
|
||||
|
||||
# Install a new miniconda
|
||||
# First remove any other python or conda installation from your PATH
|
||||
# Always install miniconda 3, even if building for Python <3
|
||||
@ -438,17 +465,20 @@ chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
rm -f "$conda_sh"
|
||||
export PATH="~/my_new_conda/bin:$PATH"
|
||||
|
||||
# Create a clean python env
|
||||
# All MacOS builds use conda to manage the python env and dependencies
|
||||
# that are built with, even the pip packages
|
||||
conda create -yn binary python=2.7
|
||||
conda activate binary
|
||||
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
# You should probably always export at least these 3 variables
|
||||
export PACKAGE_TYPE=conda
|
||||
export DESIRED_PYTHON=3.7
|
||||
export DESIRED_PYTHON=3.6
|
||||
export DESIRED_CUDA=cpu
|
||||
|
||||
# Call the entrypoint you want
|
||||
path/to/builder/wheel/build_wheel.sh
|
||||
```
|
||||
|
||||
@ -9,9 +9,8 @@ should be "pruned".
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
import cimodel.data.dimensions as dimensions
|
||||
|
||||
from cimodel.lib.conf_tree import ConfigNode
|
||||
import cimodel.data.dimensions as dimensions
|
||||
|
||||
|
||||
LINKING_DIMENSIONS = [
|
||||
@ -27,18 +26,38 @@ DEPS_INCLUSION_DIMENSIONS = [
|
||||
|
||||
|
||||
def get_processor_arch_name(gpu_version):
|
||||
return (
|
||||
"cpu"
|
||||
if not gpu_version
|
||||
else (
|
||||
"cu" + gpu_version.strip("cuda")
|
||||
if gpu_version.startswith("cuda")
|
||||
else gpu_version
|
||||
)
|
||||
return "cpu" if not gpu_version else (
|
||||
"cu" + gpu_version.strip("cuda") if gpu_version.startswith("cuda") else gpu_version
|
||||
)
|
||||
|
||||
|
||||
CONFIG_TREE_DATA = OrderedDict()
|
||||
CONFIG_TREE_DATA = OrderedDict(
|
||||
macos=([None], OrderedDict(
|
||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7",
|
||||
],
|
||||
)),
|
||||
macos_arm64=([None], OrderedDict(
|
||||
wheel=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
"3.10",
|
||||
],
|
||||
conda=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
"3.10",
|
||||
],
|
||||
)),
|
||||
windows=(
|
||||
# Stop building Win+CU102, see https://github.com/pytorch/pytorch/issues/65648
|
||||
[v for v in dimensions.GPU_VERSIONS if v not in dimensions.ROCM_VERSION_LABELS and v != "cuda102"],
|
||||
OrderedDict(
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
# GCC config variants:
|
||||
#
|
||||
@ -48,8 +67,8 @@ CONFIG_TREE_DATA = OrderedDict()
|
||||
#
|
||||
# Libtorch with new gcc ABI is built with gcc 5.4 on Ubuntu 16.04.
|
||||
LINUX_GCC_CONFIG_VARIANTS = OrderedDict(
|
||||
manywheel=["devtoolset7"],
|
||||
conda=["devtoolset7"],
|
||||
manywheel=['devtoolset7'],
|
||||
conda=['devtoolset7'],
|
||||
libtorch=[
|
||||
"devtoolset7",
|
||||
"gcc5.4_cxx11-abi",
|
||||
@ -64,20 +83,18 @@ WINDOWS_LIBTORCH_CONFIG_VARIANTS = [
|
||||
|
||||
class TopLevelNode(ConfigNode):
|
||||
def __init__(self, node_name, config_tree_data, smoke):
|
||||
super().__init__(None, node_name)
|
||||
super(TopLevelNode, self).__init__(None, node_name)
|
||||
|
||||
self.config_tree_data = config_tree_data
|
||||
self.props["smoke"] = smoke
|
||||
|
||||
def get_children(self):
|
||||
return [
|
||||
OSConfigNode(self, x, c, p) for (x, (c, p)) in self.config_tree_data.items()
|
||||
]
|
||||
return [OSConfigNode(self, x, c, p) for (x, (c, p)) in self.config_tree_data.items()]
|
||||
|
||||
|
||||
class OSConfigNode(ConfigNode):
|
||||
def __init__(self, parent, os_name, gpu_versions, py_tree):
|
||||
super().__init__(parent, os_name)
|
||||
super(OSConfigNode, self).__init__(parent, os_name)
|
||||
|
||||
self.py_tree = py_tree
|
||||
self.props["os_name"] = os_name
|
||||
@ -89,32 +106,24 @@ class OSConfigNode(ConfigNode):
|
||||
|
||||
class PackageFormatConfigNode(ConfigNode):
|
||||
def __init__(self, parent, package_format, python_versions):
|
||||
super().__init__(parent, package_format)
|
||||
super(PackageFormatConfigNode, self).__init__(parent, package_format)
|
||||
|
||||
self.props["python_versions"] = python_versions
|
||||
self.props["package_format"] = package_format
|
||||
|
||||
|
||||
def get_children(self):
|
||||
if self.find_prop("os_name") == "linux":
|
||||
return [
|
||||
LinuxGccConfigNode(self, v)
|
||||
for v in LINUX_GCC_CONFIG_VARIANTS[self.find_prop("package_format")]
|
||||
]
|
||||
elif (
|
||||
self.find_prop("os_name") == "windows"
|
||||
and self.find_prop("package_format") == "libtorch"
|
||||
):
|
||||
return [
|
||||
WindowsLibtorchConfigNode(self, v)
|
||||
for v in WINDOWS_LIBTORCH_CONFIG_VARIANTS
|
||||
]
|
||||
return [LinuxGccConfigNode(self, v) for v in LINUX_GCC_CONFIG_VARIANTS[self.find_prop("package_format")]]
|
||||
elif self.find_prop("os_name") == "windows" and self.find_prop("package_format") == "libtorch":
|
||||
return [WindowsLibtorchConfigNode(self, v) for v in WINDOWS_LIBTORCH_CONFIG_VARIANTS]
|
||||
else:
|
||||
return [ArchConfigNode(self, v) for v in self.find_prop("gpu_versions")]
|
||||
|
||||
|
||||
class LinuxGccConfigNode(ConfigNode):
|
||||
def __init__(self, parent, gcc_config_variant):
|
||||
super().__init__(parent, "GCC_CONFIG_VARIANT=" + str(gcc_config_variant))
|
||||
super(LinuxGccConfigNode, self).__init__(parent, "GCC_CONFIG_VARIANT=" + str(gcc_config_variant))
|
||||
|
||||
self.props["gcc_config_variant"] = gcc_config_variant
|
||||
|
||||
@ -123,29 +132,23 @@ class LinuxGccConfigNode(ConfigNode):
|
||||
|
||||
# XXX devtoolset7 on CUDA 9.0 is temporarily disabled
|
||||
# see https://github.com/pytorch/pytorch/issues/20066
|
||||
if self.find_prop("gcc_config_variant") == "devtoolset7":
|
||||
if self.find_prop("gcc_config_variant") == 'devtoolset7':
|
||||
gpu_versions = filter(lambda x: x != "cuda_90", gpu_versions)
|
||||
|
||||
# XXX disabling conda rocm build since docker images are not there
|
||||
if self.find_prop("package_format") == "conda":
|
||||
gpu_versions = filter(
|
||||
lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions
|
||||
)
|
||||
if self.find_prop("package_format") == 'conda':
|
||||
gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions)
|
||||
|
||||
# XXX libtorch rocm build is temporarily disabled
|
||||
if self.find_prop("package_format") == "libtorch":
|
||||
gpu_versions = filter(
|
||||
lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions
|
||||
)
|
||||
if self.find_prop("package_format") == 'libtorch':
|
||||
gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions)
|
||||
|
||||
return [ArchConfigNode(self, v) for v in gpu_versions]
|
||||
|
||||
|
||||
class WindowsLibtorchConfigNode(ConfigNode):
|
||||
def __init__(self, parent, libtorch_config_variant):
|
||||
super().__init__(
|
||||
parent, "LIBTORCH_CONFIG_VARIANT=" + str(libtorch_config_variant)
|
||||
)
|
||||
super(WindowsLibtorchConfigNode, self).__init__(parent, "LIBTORCH_CONFIG_VARIANT=" + str(libtorch_config_variant))
|
||||
|
||||
self.props["libtorch_config_variant"] = libtorch_config_variant
|
||||
|
||||
@ -155,7 +158,7 @@ class WindowsLibtorchConfigNode(ConfigNode):
|
||||
|
||||
class ArchConfigNode(ConfigNode):
|
||||
def __init__(self, parent, gpu):
|
||||
super().__init__(parent, get_processor_arch_name(gpu))
|
||||
super(ArchConfigNode, self).__init__(parent, get_processor_arch_name(gpu))
|
||||
|
||||
self.props["gpu"] = gpu
|
||||
|
||||
@ -165,7 +168,7 @@ class ArchConfigNode(ConfigNode):
|
||||
|
||||
class PyVersionConfigNode(ConfigNode):
|
||||
def __init__(self, parent, pyver):
|
||||
super().__init__(parent, pyver)
|
||||
super(PyVersionConfigNode, self).__init__(parent, pyver)
|
||||
|
||||
self.props["pyver"] = pyver
|
||||
|
||||
@ -181,18 +184,14 @@ class PyVersionConfigNode(ConfigNode):
|
||||
|
||||
class LinkingVariantConfigNode(ConfigNode):
|
||||
def __init__(self, parent, linking_variant):
|
||||
super().__init__(parent, linking_variant)
|
||||
super(LinkingVariantConfigNode, self).__init__(parent, linking_variant)
|
||||
|
||||
def get_children(self):
|
||||
return [
|
||||
DependencyInclusionConfigNode(self, v) for v in DEPS_INCLUSION_DIMENSIONS
|
||||
]
|
||||
return [DependencyInclusionConfigNode(self, v) for v in DEPS_INCLUSION_DIMENSIONS]
|
||||
|
||||
|
||||
class DependencyInclusionConfigNode(ConfigNode):
|
||||
def __init__(self, parent, deps_variant):
|
||||
super().__init__(parent, deps_variant)
|
||||
super(DependencyInclusionConfigNode, self).__init__(parent, deps_variant)
|
||||
|
||||
self.props["libtorch_variant"] = "-".join(
|
||||
[self.parent.get_label(), self.get_label()]
|
||||
)
|
||||
self.props["libtorch_variant"] = "-".join([self.parent.get_label(), self.get_label()])
|
||||
|
||||
@ -1,24 +1,13 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
import cimodel.data.binary_build_data as binary_build_data
|
||||
|
||||
import cimodel.data.simple.util.branch_filters as branch_filters
|
||||
import cimodel.data.binary_build_data as binary_build_data
|
||||
import cimodel.lib.conf_tree as conf_tree
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
class Conf(object):
|
||||
def __init__(self, os, gpu_version, pydistro, parms, smoke, libtorch_variant, gcc_config_variant, libtorch_config_variant):
|
||||
|
||||
class Conf:
|
||||
def __init__(
|
||||
self,
|
||||
os,
|
||||
gpu_version,
|
||||
pydistro,
|
||||
parms,
|
||||
smoke,
|
||||
libtorch_variant,
|
||||
gcc_config_variant,
|
||||
libtorch_config_variant,
|
||||
):
|
||||
self.os = os
|
||||
self.gpu_version = gpu_version
|
||||
self.pydistro = pydistro
|
||||
@ -29,11 +18,7 @@ class Conf:
|
||||
self.libtorch_config_variant = libtorch_config_variant
|
||||
|
||||
def gen_build_env_parms(self):
|
||||
elems = (
|
||||
[self.pydistro]
|
||||
+ self.parms
|
||||
+ [binary_build_data.get_processor_arch_name(self.gpu_version)]
|
||||
)
|
||||
elems = [self.pydistro] + self.parms + [binary_build_data.get_processor_arch_name(self.gpu_version)]
|
||||
if self.gcc_config_variant is not None:
|
||||
elems.append(str(self.gcc_config_variant))
|
||||
if self.libtorch_config_variant is not None:
|
||||
@ -41,7 +26,7 @@ class Conf:
|
||||
return elems
|
||||
|
||||
def gen_docker_image(self):
|
||||
if self.gcc_config_variant == "gcc5.4_cxx11-abi":
|
||||
if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
|
||||
if self.gpu_version is None:
|
||||
return miniutils.quote("pytorch/libtorch-cxx11-builder:cpu")
|
||||
else:
|
||||
@ -52,41 +37,30 @@ class Conf:
|
||||
if self.gpu_version is None:
|
||||
return miniutils.quote("pytorch/conda-builder:cpu")
|
||||
else:
|
||||
return miniutils.quote(f"pytorch/conda-builder:{self.gpu_version}")
|
||||
return miniutils.quote(
|
||||
f"pytorch/conda-builder:{self.gpu_version}"
|
||||
)
|
||||
|
||||
docker_word_substitution = {
|
||||
"manywheel": "manylinux",
|
||||
"libtorch": "manylinux",
|
||||
}
|
||||
|
||||
docker_distro_prefix = miniutils.override(
|
||||
self.pydistro, docker_word_substitution
|
||||
)
|
||||
docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution)
|
||||
|
||||
# The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image
|
||||
# TODO cuda images should consolidate into tag-base images similar to rocm
|
||||
alt_docker_suffix = (
|
||||
"cuda102"
|
||||
if not self.gpu_version
|
||||
else (
|
||||
"rocm:" + self.gpu_version.strip("rocm")
|
||||
if self.gpu_version.startswith("rocm")
|
||||
else self.gpu_version
|
||||
)
|
||||
)
|
||||
docker_distro_suffix = (
|
||||
alt_docker_suffix
|
||||
if self.pydistro != "conda"
|
||||
else ("cuda" if alt_docker_suffix.startswith("cuda") else "rocm")
|
||||
)
|
||||
return miniutils.quote(
|
||||
"pytorch/" + docker_distro_prefix + "-" + docker_distro_suffix
|
||||
)
|
||||
alt_docker_suffix = "cuda102" if not self.gpu_version else (
|
||||
"rocm:" + self.gpu_version.strip("rocm") if self.gpu_version.startswith("rocm") else self.gpu_version)
|
||||
docker_distro_suffix = alt_docker_suffix if self.pydistro != "conda" else (
|
||||
"cuda" if alt_docker_suffix.startswith("cuda") else "rocm")
|
||||
return miniutils.quote("pytorch/" + docker_distro_prefix + "-" + docker_distro_suffix)
|
||||
|
||||
def get_name_prefix(self):
|
||||
return "smoke" if self.smoke else "binary"
|
||||
|
||||
def gen_build_name(self, build_or_test, nightly):
|
||||
|
||||
parts = [self.get_name_prefix(), self.os] + self.gen_build_env_parms()
|
||||
|
||||
if nightly:
|
||||
@ -104,9 +78,7 @@ class Conf:
|
||||
def gen_workflow_job(self, phase, upload_phase_dependency=None, nightly=False):
|
||||
job_def = OrderedDict()
|
||||
job_def["name"] = self.gen_build_name(phase, nightly)
|
||||
job_def["build_environment"] = miniutils.quote(
|
||||
" ".join(self.gen_build_env_parms())
|
||||
)
|
||||
job_def["build_environment"] = miniutils.quote(" ".join(self.gen_build_env_parms()))
|
||||
if self.smoke:
|
||||
job_def["requires"] = [
|
||||
"update_s3_htmls",
|
||||
@ -144,48 +116,47 @@ class Conf:
|
||||
|
||||
os_name = miniutils.override(self.os, {"macos": "mac"})
|
||||
job_name = "_".join([self.get_name_prefix(), os_name, phase])
|
||||
return {job_name: job_def}
|
||||
return {job_name : job_def}
|
||||
|
||||
def gen_upload_job(self, phase, requires_dependency):
|
||||
"""Generate binary_upload job for configuration
|
||||
|
||||
Output looks similar to:
|
||||
Output looks similar to:
|
||||
|
||||
- binary_upload:
|
||||
name: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_upload
|
||||
context: org-member
|
||||
requires: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_test
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only: /v[0-9]+(\\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: manywheel
|
||||
upload_subfolder: cu113
|
||||
- binary_upload:
|
||||
name: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_upload
|
||||
context: org-member
|
||||
requires: binary_linux_manywheel_3_7m_cu113_devtoolset7_nightly_test
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only: /v[0-9]+(\\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: manywheel
|
||||
upload_subfolder: cu113
|
||||
"""
|
||||
return {
|
||||
"binary_upload": OrderedDict(
|
||||
{
|
||||
"name": self.gen_build_name(phase, nightly=True),
|
||||
"context": "org-member",
|
||||
"requires": [
|
||||
self.gen_build_name(requires_dependency, nightly=True)
|
||||
],
|
||||
"filters": branch_filters.gen_filter_dict(
|
||||
branches_list=["nightly"],
|
||||
tags_list=[branch_filters.RC_PATTERN],
|
||||
),
|
||||
"package_type": self.pydistro,
|
||||
"upload_subfolder": binary_build_data.get_processor_arch_name(
|
||||
self.gpu_version,
|
||||
),
|
||||
}
|
||||
)
|
||||
"binary_upload": OrderedDict({
|
||||
"name": self.gen_build_name(phase, nightly=True),
|
||||
"context": "org-member",
|
||||
"requires": [self.gen_build_name(
|
||||
requires_dependency,
|
||||
nightly=True
|
||||
)],
|
||||
"filters": branch_filters.gen_filter_dict(
|
||||
branches_list=["nightly"],
|
||||
tags_list=[branch_filters.RC_PATTERN],
|
||||
),
|
||||
"package_type": self.pydistro,
|
||||
"upload_subfolder": binary_build_data.get_processor_arch_name(
|
||||
self.gpu_version,
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
def get_root(smoke, name):
|
||||
|
||||
return binary_build_data.TopLevelNode(
|
||||
name,
|
||||
binary_build_data.CONFIG_TREE_DATA,
|
||||
@ -194,6 +165,7 @@ def get_root(smoke, name):
|
||||
|
||||
|
||||
def gen_build_env_list(smoke):
|
||||
|
||||
root = get_root(smoke, "N/A")
|
||||
config_list = conf_tree.dfs(root)
|
||||
|
||||
@ -204,8 +176,7 @@ def gen_build_env_list(smoke):
|
||||
c.find_prop("gpu"),
|
||||
c.find_prop("package_format"),
|
||||
[c.find_prop("pyver")],
|
||||
c.find_prop("smoke")
|
||||
and not (c.find_prop("os_name") == "macos_arm64"), # don't test arm64
|
||||
c.find_prop("smoke") and not (c.find_prop("os_name") == "macos_arm64"), # don't test arm64
|
||||
c.find_prop("libtorch_variant"),
|
||||
c.find_prop("gcc_config_variant"),
|
||||
c.find_prop("libtorch_config_variant"),
|
||||
@ -214,11 +185,9 @@ def gen_build_env_list(smoke):
|
||||
|
||||
return newlist
|
||||
|
||||
|
||||
def predicate_exclude_macos(config):
|
||||
return config.os == "linux" or config.os == "windows"
|
||||
|
||||
|
||||
def get_nightly_uploads():
|
||||
configs = gen_build_env_list(False)
|
||||
mylist = []
|
||||
@ -228,7 +197,6 @@ def get_nightly_uploads():
|
||||
|
||||
return mylist
|
||||
|
||||
|
||||
def get_post_upload_jobs():
|
||||
return [
|
||||
{
|
||||
@ -242,8 +210,8 @@ def get_post_upload_jobs():
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def get_nightly_tests():
|
||||
|
||||
configs = gen_build_env_list(False)
|
||||
filtered_configs = filter(predicate_exclude_macos, configs)
|
||||
|
||||
|
||||
@ -2,9 +2,9 @@ PHASES = ["build", "test"]
|
||||
|
||||
CUDA_VERSIONS = [
|
||||
"102",
|
||||
"111",
|
||||
"113",
|
||||
"116",
|
||||
"117",
|
||||
"115",
|
||||
]
|
||||
|
||||
ROCM_VERSIONS = [
|
||||
@ -16,4 +16,9 @@ ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
|
||||
|
||||
GPU_VERSIONS = [None] + ["cuda" + v for v in CUDA_VERSIONS] + ROCM_VERSION_LABELS
|
||||
|
||||
STANDARD_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"]
|
||||
STANDARD_PYTHON_VERSIONS = [
|
||||
"3.7",
|
||||
"3.8",
|
||||
"3.9",
|
||||
"3.10"
|
||||
]
|
||||
|
||||
@ -1,7 +1,8 @@
|
||||
from cimodel.lib.conf_tree import ConfigNode
|
||||
|
||||
|
||||
CONFIG_TREE_DATA = []
|
||||
CONFIG_TREE_DATA = [
|
||||
]
|
||||
|
||||
|
||||
def get_major_pyver(dotted_version):
|
||||
@ -11,7 +12,7 @@ def get_major_pyver(dotted_version):
|
||||
|
||||
class TreeConfigNode(ConfigNode):
|
||||
def __init__(self, parent, node_name, subtree):
|
||||
super().__init__(parent, self.modify_label(node_name))
|
||||
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
|
||||
self.subtree = subtree
|
||||
self.init2(node_name)
|
||||
|
||||
@ -27,7 +28,7 @@ class TreeConfigNode(ConfigNode):
|
||||
|
||||
class TopLevelNode(TreeConfigNode):
|
||||
def __init__(self, node_name, subtree):
|
||||
super().__init__(None, node_name, subtree)
|
||||
super(TopLevelNode, self).__init__(None, node_name, subtree)
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
@ -70,11 +71,10 @@ class ExperimentalFeatureConfigNode(TreeConfigNode):
|
||||
next_nodes = {
|
||||
"asan": AsanConfigNode,
|
||||
"xla": XlaConfigNode,
|
||||
"mps": MPSConfigNode,
|
||||
"mlc": MLCConfigNode,
|
||||
"vulkan": VulkanConfigNode,
|
||||
"parallel_tbb": ParallelTBBConfigNode,
|
||||
"crossref": CrossRefConfigNode,
|
||||
"dynamo": DynamoConfigNode,
|
||||
"noarch": NoarchConfigNode,
|
||||
"parallel_native": ParallelNativeConfigNode,
|
||||
"onnx": ONNXConfigNode,
|
||||
"libtorch": LibTorchConfigNode,
|
||||
@ -95,7 +95,6 @@ class SlowGradcheckConfigNode(TreeConfigNode):
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class PureTorchConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "PURE_TORCH=" + str(label)
|
||||
@ -117,13 +116,12 @@ class XlaConfigNode(TreeConfigNode):
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class MPSConfigNode(TreeConfigNode):
|
||||
class MLCConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "MPS=" + str(label)
|
||||
return "MLC=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_mps"] = node_name
|
||||
self.props["is_mlc"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
@ -173,17 +171,9 @@ class ParallelTBBConfigNode(TreeConfigNode):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class CrossRefConfigNode(TreeConfigNode):
|
||||
class NoarchConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_crossref"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class DynamoConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_dynamo"] = node_name
|
||||
self.props["is_noarch"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
@ -255,11 +245,8 @@ class XenialCompilerConfigNode(TreeConfigNode):
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return (
|
||||
XenialCompilerVersionConfigNode
|
||||
if self.props["compiler_name"]
|
||||
else PyVerConfigNode
|
||||
)
|
||||
|
||||
return XenialCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
|
||||
|
||||
|
||||
class BionicCompilerConfigNode(TreeConfigNode):
|
||||
@ -271,11 +258,8 @@ class BionicCompilerConfigNode(TreeConfigNode):
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return (
|
||||
BionicCompilerVersionConfigNode
|
||||
if self.props["compiler_name"]
|
||||
else PyVerConfigNode
|
||||
)
|
||||
|
||||
return BionicCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
|
||||
|
||||
|
||||
class XenialCompilerVersionConfigNode(TreeConfigNode):
|
||||
|
||||
@ -111,10 +111,10 @@ class Conf:
|
||||
parameters["resource_class"] = resource_class
|
||||
if phase == "build" and self.rocm_version is not None:
|
||||
parameters["resource_class"] = "xlarge"
|
||||
if hasattr(self, "filters"):
|
||||
parameters["filters"] = self.filters
|
||||
if hasattr(self, 'filters'):
|
||||
parameters['filters'] = self.filters
|
||||
if self.build_only:
|
||||
parameters["build_only"] = miniutils.quote(str(int(True)))
|
||||
parameters['build_only'] = miniutils.quote(str(int(True)))
|
||||
return parameters
|
||||
|
||||
def gen_workflow_job(self, phase):
|
||||
@ -122,6 +122,7 @@ class Conf:
|
||||
job_def["name"] = self.gen_build_name(phase)
|
||||
|
||||
if Conf.is_test_phase(phase):
|
||||
|
||||
# TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a
|
||||
# caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated
|
||||
# build of pytorch in the caffe2 build job, and just run the caffe2 tests off of a completed
|
||||
@ -142,7 +143,7 @@ class Conf:
|
||||
|
||||
|
||||
# TODO This is a hack to special case some configs just for the workflow list
|
||||
class HiddenConf:
|
||||
class HiddenConf(object):
|
||||
def __init__(self, name, parent_build=None, filters=None):
|
||||
self.name = name
|
||||
self.parent_build = parent_build
|
||||
@ -159,8 +160,7 @@ class HiddenConf:
|
||||
def gen_build_name(self, _):
|
||||
return self.name
|
||||
|
||||
|
||||
class DocPushConf:
|
||||
class DocPushConf(object):
|
||||
def __init__(self, name, parent_build=None, branch="master"):
|
||||
self.name = name
|
||||
self.parent_build = parent_build
|
||||
@ -173,13 +173,11 @@ class DocPushConf:
|
||||
"branch": self.branch,
|
||||
"requires": [self.parent_build],
|
||||
"context": "org-member",
|
||||
"filters": gen_filter_dict(
|
||||
branches_list=["nightly"], tags_list=RC_PATTERN
|
||||
),
|
||||
"filters": gen_filter_dict(branches_list=["nightly"],
|
||||
tags_list=RC_PATTERN)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def gen_docs_configs(xenial_parent_config):
|
||||
configs = []
|
||||
|
||||
@ -187,9 +185,8 @@ def gen_docs_configs(xenial_parent_config):
|
||||
HiddenConf(
|
||||
"pytorch_python_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(
|
||||
branches_list=["master", "main", "nightly"], tags_list=RC_PATTERN
|
||||
),
|
||||
filters=gen_filter_dict(branches_list=["master", "nightly"],
|
||||
tags_list=RC_PATTERN),
|
||||
)
|
||||
)
|
||||
configs.append(
|
||||
@ -204,9 +201,8 @@ def gen_docs_configs(xenial_parent_config):
|
||||
HiddenConf(
|
||||
"pytorch_cpp_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(
|
||||
branches_list=["master", "main", "nightly"], tags_list=RC_PATTERN
|
||||
),
|
||||
filters=gen_filter_dict(branches_list=["master", "nightly"],
|
||||
tags_list=RC_PATTERN),
|
||||
)
|
||||
)
|
||||
configs.append(
|
||||
@ -230,19 +226,20 @@ def gen_tree():
|
||||
|
||||
|
||||
def instantiate_configs(only_slow_gradcheck):
|
||||
|
||||
config_list = []
|
||||
|
||||
root = get_root()
|
||||
found_configs = conf_tree.dfs(root)
|
||||
for fc in found_configs:
|
||||
|
||||
restrict_phases = None
|
||||
distro_name = fc.find_prop("distro_name")
|
||||
compiler_name = fc.find_prop("compiler_name")
|
||||
compiler_version = fc.find_prop("compiler_version")
|
||||
is_xla = fc.find_prop("is_xla") or False
|
||||
is_asan = fc.find_prop("is_asan") or False
|
||||
is_crossref = fc.find_prop("is_crossref") or False
|
||||
is_dynamo = fc.find_prop("is_dynamo") or False
|
||||
is_noarch = fc.find_prop("is_noarch") or False
|
||||
is_onnx = fc.find_prop("is_onnx") or False
|
||||
is_pure_torch = fc.find_prop("is_pure_torch") or False
|
||||
is_vulkan = fc.find_prop("is_vulkan") or False
|
||||
@ -286,11 +283,8 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list[0] = fc.find_prop("abbreviated_pyver")
|
||||
|
||||
if is_crossref:
|
||||
parms_list_ignored_for_docker_image.append("crossref")
|
||||
|
||||
if is_dynamo:
|
||||
parms_list_ignored_for_docker_image.append("dynamo")
|
||||
if is_noarch:
|
||||
parms_list_ignored_for_docker_image.append("noarch")
|
||||
|
||||
if is_onnx:
|
||||
parms_list.append("onnx")
|
||||
@ -340,12 +334,13 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
build_only=build_only,
|
||||
)
|
||||
|
||||
# run docs builds on "pytorch-linux-xenial-py3.7-gcc5.4". Docs builds
|
||||
# run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds
|
||||
# should run on a CPU-only build that runs on all PRs.
|
||||
# XXX should this be updated to a more modern build?
|
||||
# XXX should this be updated to a more modern build? Projects are
|
||||
# beginning to drop python3.6
|
||||
if (
|
||||
distro_name == "xenial"
|
||||
and fc.find_prop("pyver") == "3.7"
|
||||
and fc.find_prop("pyver") == "3.6"
|
||||
and cuda_version is None
|
||||
and parallel_backend is None
|
||||
and not is_vulkan
|
||||
@ -353,7 +348,8 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
and compiler_name == "gcc"
|
||||
and fc.find_prop("compiler_version") == "5.4"
|
||||
):
|
||||
c.filters = gen_filter_dict(branches_list=r"/.*/", tags_list=RC_PATTERN)
|
||||
c.filters = gen_filter_dict(branches_list=r"/.*/",
|
||||
tags_list=RC_PATTERN)
|
||||
c.dependent_tests = gen_docs_configs(c)
|
||||
|
||||
config_list.append(c)
|
||||
@ -362,13 +358,16 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
|
||||
|
||||
def get_workflow_jobs(only_slow_gradcheck=False):
|
||||
|
||||
config_list = instantiate_configs(only_slow_gradcheck)
|
||||
|
||||
x = []
|
||||
for conf_options in config_list:
|
||||
|
||||
phases = conf_options.restrict_phases or dimensions.PHASES
|
||||
|
||||
for phase in phases:
|
||||
|
||||
# TODO why does this not have a test?
|
||||
if Conf.is_test_phase(phase) and conf_options.cuda_version == "10":
|
||||
continue
|
||||
|
||||
28
.circleci/cimodel/data/simple/anaconda_prune_defintions.py
Normal file
28
.circleci/cimodel/data/simple/anaconda_prune_defintions.py
Normal file
@ -0,0 +1,28 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict
|
||||
from cimodel.lib.miniutils import quote
|
||||
|
||||
|
||||
CHANNELS_TO_PRUNE = ["pytorch-nightly", "pytorch-test"]
|
||||
PACKAGES_TO_PRUNE = "pytorch torchvision torchaudio torchtext ignite torchcsprng"
|
||||
|
||||
|
||||
def gen_workflow_job(channel: str):
|
||||
return OrderedDict(
|
||||
{
|
||||
"anaconda_prune": OrderedDict(
|
||||
{
|
||||
"name": f"anaconda-prune-{channel}",
|
||||
"context": quote("org-member"),
|
||||
"packages": quote(PACKAGES_TO_PRUNE),
|
||||
"channel": channel,
|
||||
"filters": gen_filter_dict(branches_list=["postnightly"]),
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [gen_workflow_job(channel) for channel in CHANNELS_TO_PRUNE]
|
||||
103
.circleci/cimodel/data/simple/android_definitions.py
Normal file
103
.circleci/cimodel/data/simple/android_definitions.py
Normal file
@ -0,0 +1,103 @@
|
||||
import cimodel.data.simple.util.branch_filters as branch_filters
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
|
||||
|
||||
class AndroidJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
template_name,
|
||||
is_master_only=True):
|
||||
|
||||
self.variant = variant
|
||||
self.template_name = template_name
|
||||
self.is_master_only = is_master_only
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"android",
|
||||
"ndk",
|
||||
"r19c",
|
||||
] + self.variant + [
|
||||
"build",
|
||||
]
|
||||
|
||||
full_job_name = "_".join(base_name_parts)
|
||||
build_env_name = "-".join(base_name_parts)
|
||||
|
||||
props_dict = {
|
||||
"name": full_job_name,
|
||||
"build_environment": "\"{}\"".format(build_env_name),
|
||||
"docker_image": "\"{}\"".format(DOCKER_IMAGE_NDK),
|
||||
"requires": [DOCKER_REQUIREMENT_NDK]
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
class AndroidGradleJob:
|
||||
def __init__(self,
|
||||
job_name,
|
||||
template_name,
|
||||
dependencies,
|
||||
is_master_only=True,
|
||||
is_pr_only=False,
|
||||
extra_props=tuple()):
|
||||
|
||||
self.job_name = job_name
|
||||
self.template_name = template_name
|
||||
self.dependencies = dependencies
|
||||
self.is_master_only = is_master_only
|
||||
self.is_pr_only = is_pr_only
|
||||
self.extra_props = dict(extra_props)
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
props_dict = {
|
||||
"name": self.job_name,
|
||||
"requires": self.dependencies,
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
||||
elif self.is_pr_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.PR_BRANCH_LIST)
|
||||
if self.extra_props:
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
AndroidJob(["x86_32"], "pytorch_linux_build", is_master_only=False),
|
||||
AndroidJob(["x86_64"], "pytorch_linux_build"),
|
||||
AndroidJob(["arm", "v7a"], "pytorch_linux_build"),
|
||||
AndroidJob(["arm", "v8a"], "pytorch_linux_build"),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build-x86_32",
|
||||
"pytorch_android_gradle_build-x86_32",
|
||||
["pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build"],
|
||||
is_master_only=False,
|
||||
is_pr_only=True),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build",
|
||||
"pytorch_android_gradle_build",
|
||||
["pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build"]),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
193
.circleci/cimodel/data/simple/binary_smoketest.py
Normal file
193
.circleci/cimodel/data/simple/binary_smoketest.py
Normal file
@ -0,0 +1,193 @@
|
||||
"""
|
||||
TODO: Refactor circleci/cimodel/data/binary_build_data.py to generate this file
|
||||
instead of doing one offs here
|
||||
Binary builds (subset, to smoke test that they'll work)
|
||||
|
||||
NB: If you modify this file, you need to also modify
|
||||
the binary_and_smoke_tests_on_pr variable in
|
||||
pytorch-ci-hud to adjust the allowed build list
|
||||
at https://github.com/ezyang/pytorch-ci-hud/blob/master/src/BuildHistoryDisplay.js
|
||||
|
||||
Note:
|
||||
This binary build is currently broken, see https://github_com/pytorch/pytorch/issues/16710
|
||||
- binary_linux_conda_3_6_cu90_devtoolset7_build
|
||||
- binary_linux_conda_3_6_cu90_devtoolset7_test
|
||||
|
||||
TODO
|
||||
we should test a libtorch cuda build, but they take too long
|
||||
- binary_linux_libtorch_3_6m_cu90_devtoolset7_static-without-deps_build
|
||||
"""
|
||||
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.data.simple.util.branch_filters
|
||||
|
||||
|
||||
class SmoketestJob:
|
||||
def __init__(self,
|
||||
template_name,
|
||||
build_env_parts,
|
||||
docker_image,
|
||||
job_name,
|
||||
is_master_only=False,
|
||||
requires=None,
|
||||
has_libtorch_variant=False,
|
||||
extra_props=None):
|
||||
|
||||
self.template_name = template_name
|
||||
self.build_env_parts = build_env_parts
|
||||
self.docker_image = docker_image
|
||||
self.job_name = job_name
|
||||
self.is_master_only = is_master_only
|
||||
self.requires = requires or []
|
||||
self.has_libtorch_variant = has_libtorch_variant
|
||||
self.extra_props = extra_props or {}
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
props_dict = {
|
||||
"build_environment": " ".join(self.build_env_parts),
|
||||
"name": self.job_name,
|
||||
"requires": self.requires,
|
||||
}
|
||||
|
||||
if self.docker_image:
|
||||
props_dict["docker_image"] = self.docker_image
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
|
||||
|
||||
if self.has_libtorch_variant:
|
||||
props_dict["libtorch_variant"] = "shared-with-deps"
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["manywheel", "3.7m", "cu102", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_manywheel_3_7m_cu102_devtoolset7_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build",
|
||||
is_master_only=True,
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
||||
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
||||
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build",
|
||||
is_master_only=False,
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_mac_build",
|
||||
["wheel", "3.7", "cpu"],
|
||||
None,
|
||||
"binary_macos_wheel_3_7_cpu_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
# This job has an average run time of 3 hours o.O
|
||||
# Now only running this on master to reduce overhead
|
||||
SmoketestJob(
|
||||
"binary_mac_build",
|
||||
["libtorch", "3.7", "cpu"],
|
||||
None,
|
||||
"binary_macos_libtorch_3_7_cpu_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["libtorch", "3.7", "cpu", "debug"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_debug_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["libtorch", "3.7", "cpu", "release"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_release_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["wheel", "3.7", "cu113"],
|
||||
None,
|
||||
"binary_windows_wheel_3_7_cu113_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["libtorch", "3.7", "cpu", "debug"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_debug_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_windows_libtorch_3_7_cpu_debug_build"],
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["libtorch", "3.7", "cpu", "release"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_release_test",
|
||||
is_master_only=False,
|
||||
requires=["binary_windows_libtorch_3_7_cpu_release_build"],
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["wheel", "3.7", "cu113"],
|
||||
None,
|
||||
"binary_windows_wheel_3_7_cu113_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_windows_wheel_3_7_cu113_build"],
|
||||
extra_props={
|
||||
"executor": "windows-with-nvidia-gpu",
|
||||
},
|
||||
),
|
||||
|
||||
|
||||
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["manywheel", "3.7m", "cu102", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_manywheel_3_7m_cu102_devtoolset7_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_manywheel_3_7m_cu102_devtoolset7_build"],
|
||||
extra_props={
|
||||
"resource_class": "gpu.medium",
|
||||
"use_cuda_docker_runtime": miniutils.quote((str(1))),
|
||||
},
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build"],
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
||||
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
||||
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build"],
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,39 +1,39 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
|
||||
|
||||
from cimodel.lib.miniutils import quote
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
|
||||
|
||||
|
||||
# NOTE: All hardcoded docker image builds have been migrated to GHA
|
||||
IMAGE_NAMES = []
|
||||
IMAGE_NAMES = [
|
||||
]
|
||||
|
||||
# This entry should be an element from the list above
|
||||
# This should contain the image matching the "slow_gradcheck" entry in
|
||||
# pytorch_build_data.py
|
||||
SLOW_GRADCHECK_IMAGE_NAME = "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
|
||||
|
||||
def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False):
|
||||
"""Generates a list of docker image build definitions"""
|
||||
ret = []
|
||||
for image_name in images:
|
||||
if image_name.startswith("docker-"):
|
||||
image_name = image_name.lstrip("docker-")
|
||||
if image_name.startswith('docker-'):
|
||||
image_name = image_name.lstrip('docker-')
|
||||
if only_slow_gradcheck and image_name is not SLOW_GRADCHECK_IMAGE_NAME:
|
||||
continue
|
||||
|
||||
parameters = OrderedDict(
|
||||
{
|
||||
"name": quote(f"docker-{image_name}"),
|
||||
"image_name": quote(image_name),
|
||||
}
|
||||
)
|
||||
if image_name == "pytorch-linux-xenial-py3.7-gcc5.4":
|
||||
parameters = OrderedDict({
|
||||
"name": quote(f"docker-{image_name}"),
|
||||
"image_name": quote(image_name),
|
||||
})
|
||||
if image_name == "pytorch-linux-xenial-py3.6-gcc5.4":
|
||||
# pushing documentation on tags requires CircleCI to also
|
||||
# build all the dependencies on tags, including this docker image
|
||||
parameters["filters"] = gen_filter_dict(
|
||||
branches_list=r"/.*/", tags_list=RC_PATTERN
|
||||
)
|
||||
ret.append(OrderedDict({"docker_build_job": parameters}))
|
||||
parameters['filters'] = gen_filter_dict(branches_list=r"/.*/",
|
||||
tags_list=RC_PATTERN)
|
||||
ret.append(OrderedDict(
|
||||
{
|
||||
"docker_build_job": parameters
|
||||
}
|
||||
))
|
||||
return ret
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict_exclude
|
||||
from cimodel.data.simple.util.versions import MultiPartVersion
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
XCODE_VERSION = MultiPartVersion([12, 5, 1])
|
||||
|
||||
@ -11,10 +10,8 @@ class ArchVariant:
|
||||
self.custom_build_name = custom_build_name
|
||||
|
||||
def render(self):
|
||||
extra_parts = (
|
||||
[self.custom_build_name] if len(self.custom_build_name) > 0 else []
|
||||
)
|
||||
return "-".join([self.name] + extra_parts).replace("_", "-")
|
||||
extra_parts = [self.custom_build_name] if len(self.custom_build_name) > 0 else []
|
||||
return "_".join([self.name] + extra_parts)
|
||||
|
||||
|
||||
def get_platform(arch_variant_name):
|
||||
@ -22,37 +19,36 @@ def get_platform(arch_variant_name):
|
||||
|
||||
|
||||
class IOSJob:
|
||||
def __init__(
|
||||
self, xcode_version, arch_variant, is_org_member_context=True, extra_props=None
|
||||
):
|
||||
def __init__(self, xcode_version, arch_variant, is_org_member_context=True, extra_props=None):
|
||||
self.xcode_version = xcode_version
|
||||
self.arch_variant = arch_variant
|
||||
self.is_org_member_context = is_org_member_context
|
||||
self.extra_props = extra_props
|
||||
|
||||
def gen_name_parts(self):
|
||||
version_parts = self.xcode_version.render_dots_or_parts("-")
|
||||
build_variant_suffix = self.arch_variant.render()
|
||||
return (
|
||||
[
|
||||
"ios",
|
||||
]
|
||||
+ version_parts
|
||||
+ [
|
||||
build_variant_suffix,
|
||||
]
|
||||
)
|
||||
def gen_name_parts(self, with_version_dots):
|
||||
|
||||
version_parts = self.xcode_version.render_dots_or_parts(with_version_dots)
|
||||
build_variant_suffix = "_".join([self.arch_variant.render(), "build"])
|
||||
|
||||
return [
|
||||
"pytorch",
|
||||
"ios",
|
||||
] + version_parts + [
|
||||
build_variant_suffix,
|
||||
]
|
||||
|
||||
def gen_job_name(self):
|
||||
return "-".join(self.gen_name_parts())
|
||||
return "_".join(self.gen_name_parts(False))
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
platform_name = get_platform(self.arch_variant.name)
|
||||
|
||||
props_dict = {
|
||||
"name": self.gen_job_name(),
|
||||
"build_environment": self.gen_job_name(),
|
||||
"build_environment": "-".join(self.gen_name_parts(True)),
|
||||
"ios_arch": self.arch_variant.name,
|
||||
"ios_platform": platform_name,
|
||||
"name": self.gen_job_name(),
|
||||
}
|
||||
|
||||
if self.is_org_member_context:
|
||||
@ -61,38 +57,30 @@ class IOSJob:
|
||||
if self.extra_props:
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
props_dict["filters"] = gen_filter_dict_exclude()
|
||||
|
||||
return [{"pytorch_ios_build": props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
IOSJob(
|
||||
XCODE_VERSION,
|
||||
ArchVariant("x86_64"),
|
||||
is_org_member_context=False,
|
||||
extra_props={"lite_interpreter": miniutils.quote(str(int(True)))},
|
||||
),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64"), extra_props={
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={
|
||||
# "use_metal": miniutils.quote(str(int(True))),
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom-ops"), extra_props={
|
||||
# "op_list": "mobilenetv2.yaml",
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(
|
||||
XCODE_VERSION,
|
||||
ArchVariant("x86_64", "coreml"),
|
||||
is_org_member_context=False,
|
||||
extra_props={
|
||||
"use_coreml": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True))),
|
||||
},
|
||||
),
|
||||
# IOSJob(XCODE_VERSION, ArchVariant("arm64", "coreml"), extra_props={
|
||||
# "use_coreml": miniutils.quote(str(int(True))),
|
||||
# "lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64"), is_org_member_context=False, extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64", "full_jit"), is_org_member_context=False, extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(False)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64"), extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={
|
||||
"use_metal": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "full_jit"), extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(False)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom"), extra_props={
|
||||
"op_list": "mobilenetv2.yaml",
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64", "coreml"), is_org_member_context=False, extra_props={
|
||||
"use_coreml": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "coreml"), extra_props={
|
||||
"use_coreml": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -11,14 +11,10 @@ class MacOsJob:
|
||||
non_phase_parts = ["pytorch", "macos", self.os_version, "py3"]
|
||||
|
||||
extra_name_list = [name for name, exist in self.extra_props.items() if exist]
|
||||
full_job_name_list = (
|
||||
non_phase_parts
|
||||
+ extra_name_list
|
||||
+ [
|
||||
"build" if self.is_build else None,
|
||||
"test" if self.is_test else None,
|
||||
]
|
||||
)
|
||||
full_job_name_list = non_phase_parts + extra_name_list + [
|
||||
'build' if self.is_build else None,
|
||||
'test' if self.is_test else None,
|
||||
]
|
||||
|
||||
full_job_name = "_".join(list(filter(None, full_job_name_list)))
|
||||
|
||||
@ -45,8 +41,10 @@ WORKFLOW_DATA = [
|
||||
"10_13",
|
||||
is_build=True,
|
||||
is_test=True,
|
||||
extra_props=tuple({"lite_interpreter": True}.items()),
|
||||
),
|
||||
extra_props=tuple({
|
||||
"lite_interpreter": True
|
||||
}.items()),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -2,14 +2,17 @@
|
||||
PyTorch Mobile PR builds (use linux host toolchain + mobile build options)
|
||||
"""
|
||||
|
||||
import cimodel.data.simple.util.branch_filters
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.data.simple.util.branch_filters
|
||||
|
||||
|
||||
class MobileJob:
|
||||
def __init__(
|
||||
self, docker_image, docker_requires, variant_parts, is_master_only=False
|
||||
):
|
||||
self,
|
||||
docker_image,
|
||||
docker_requires,
|
||||
variant_parts,
|
||||
is_master_only=False):
|
||||
self.docker_image = docker_image
|
||||
self.docker_requires = docker_requires
|
||||
self.variant_parts = variant_parts
|
||||
@ -37,14 +40,13 @@ class MobileJob:
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict[
|
||||
"filters"
|
||||
] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
|
||||
props_dict["filters"] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
|
||||
|
||||
return [{"pytorch_linux_build": props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = []
|
||||
WORKFLOW_DATA = [
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
|
||||
77
.circleci/cimodel/data/simple/nightly_android.py
Normal file
77
.circleci/cimodel/data/simple/nightly_android.py
Normal file
@ -0,0 +1,77 @@
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_NDK,
|
||||
DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
|
||||
|
||||
class AndroidNightlyJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
template_name,
|
||||
extra_props=None,
|
||||
with_docker=True,
|
||||
requires=None,
|
||||
no_build_suffix=False):
|
||||
|
||||
self.variant = variant
|
||||
self.template_name = template_name
|
||||
self.extra_props = extra_props or {}
|
||||
self.with_docker = with_docker
|
||||
self.requires = requires
|
||||
self.no_build_suffix = no_build_suffix
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"android",
|
||||
"ndk",
|
||||
"r19c",
|
||||
] + self.variant
|
||||
|
||||
build_suffix = [] if self.no_build_suffix else ["build"]
|
||||
full_job_name = "_".join(["nightly"] + base_name_parts + build_suffix)
|
||||
build_env_name = "-".join(base_name_parts)
|
||||
|
||||
props_dict = {
|
||||
"name": full_job_name,
|
||||
"requires": self.requires,
|
||||
"filters": {"branches": {"only": "nightly"}},
|
||||
}
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
if self.with_docker:
|
||||
props_dict["docker_image"] = DOCKER_IMAGE_NDK
|
||||
props_dict["build_environment"] = build_env_name
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
BASE_REQUIRES = [DOCKER_REQUIREMENT_NDK]
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
AndroidNightlyJob(["x86_32"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["x86_64"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["arm", "v7a"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["arm", "v8a"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["android_gradle"], "pytorch_android_gradle_build",
|
||||
with_docker=False,
|
||||
requires=[
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build"]),
|
||||
AndroidNightlyJob(["x86_32_android_publish_snapshot"], "pytorch_android_publish_snapshot",
|
||||
extra_props={"context": "org-member"},
|
||||
with_docker=False,
|
||||
requires=["nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_android_gradle_build"],
|
||||
no_build_suffix=True),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user