mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-25 08:11:06 +08:00
Compare commits
91 Commits
issue#5873
...
v1.3.0
| Author | SHA1 | Date | |
|---|---|---|---|
| 50c90a22be | |||
| 80ae6d294b | |||
| de394b672d | |||
| 92c6401bb9 | |||
| b4f32dd292 | |||
| 3e451b4796 | |||
| 036a591556 | |||
| 86d9ee8dee | |||
| aa44ffb4c9 | |||
| 162b054e39 | |||
| 7f044f7398 | |||
| 0c81d6ba4b | |||
| d1752f2bf8 | |||
| 49fbeb8cc8 | |||
| f0d3fc70b4 | |||
| fb489555a9 | |||
| b5144f1068 | |||
| a5c08a6abd | |||
| 6cc759269f | |||
| 6742476ba3 | |||
| 067aee5f30 | |||
| 0a7f7e6d30 | |||
| e9fc91cbca | |||
| 23df957e94 | |||
| a7b161c08b | |||
| 6bae48c127 | |||
| c248943743 | |||
| e058a37fe4 | |||
| aa7112a618 | |||
| b728ffabc3 | |||
| d67898a93b | |||
| 9a25673478 | |||
| 17613ad73c | |||
| beaae6a2b6 | |||
| 328f49968c | |||
| 7f9096f868 | |||
| 7e94ee235f | |||
| 318fb8e8b9 | |||
| 43bb1b2356 | |||
| 87fbd27cc0 | |||
| 225c38b719 | |||
| 8074526e7f | |||
| 06a866de94 | |||
| 9b22a55499 | |||
| f8d3eac4c3 | |||
| 68b4d22da7 | |||
| 66b73b0950 | |||
| 32eb3b8d7b | |||
| 5a2a34cd2d | |||
| a8083e18e8 | |||
| bc3fb36ed7 | |||
| e0822f1089 | |||
| cbfd4e05e9 | |||
| b9a2c8ac5c | |||
| 6d7a73c0da | |||
| 15e4827617 | |||
| 024fa34700 | |||
| 95d2c7fc98 | |||
| 7ba2baee00 | |||
| ccf3a6de3d | |||
| 1ba6fc4ca6 | |||
| d4d4bf5686 | |||
| cee965fae9 | |||
| 544c16cdbf | |||
| 494a5563b4 | |||
| ba4c3a1c2c | |||
| c556f9052f | |||
| 5c80dd3c1f | |||
| 2d8ee11139 | |||
| ebc2519bec | |||
| 8c9e4b250d | |||
| 6a6f047fc6 | |||
| deadc27c23 | |||
| 6276fda119 | |||
| 65ee8f2c23 | |||
| 6126cfab2c | |||
| e2f6fed611 | |||
| 667deb92f7 | |||
| 0e88de5580 | |||
| 3c8ce2a57e | |||
| f2080fb3f2 | |||
| 84afb7b0c1 | |||
| b6e976ae2d | |||
| 8626a1cc81 | |||
| 831566ec90 | |||
| f7b3b20457 | |||
| 8ce38cf27d | |||
| a94f9c7246 | |||
| 2fc3bb8571 | |||
| f694d4d872 | |||
| d7b6d945eb |
@ -1,63 +0,0 @@
|
||||
# PyTorch CI Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on select configurations
|
||||
# 2) runs only TestTorch unit tests.
|
||||
|
||||
stages:
|
||||
- stage: 'Build'
|
||||
displayName: 'Build PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
@ -1,82 +0,0 @@
|
||||
# PyTorch Daily Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on all available configurations
|
||||
# 2) runs all PyTorch unit tests
|
||||
|
||||
stages:
|
||||
- stage: 'BuildTest'
|
||||
displayName: 'Build and Test PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
@ -1,134 +0,0 @@
|
||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
container_endpoint: ''
|
||||
os: ''
|
||||
cuda: ''
|
||||
is_ci_build: False
|
||||
is_official_build: False
|
||||
is_daily_build: False
|
||||
build_stage: False
|
||||
verify_stage: False
|
||||
publish_stage: False
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 300
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
DECODE_PERCENTS: false
|
||||
container:
|
||||
image: $[variables['container_image']]
|
||||
endpoint: ${{parameters.container_endpoint}}
|
||||
|
||||
steps:
|
||||
# Build stage
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Set up environment variables for specific pipeline build
|
||||
- template: set-environment-variables.yml
|
||||
parameters:
|
||||
os: ${{ parameters.os}}
|
||||
cuda: ${{ parameters.cuda}}
|
||||
is_official_build: ${{ parameters.is_official_build}}
|
||||
|
||||
# Sync and update PyTorch submodules
|
||||
- bash: git submodule update --init --recursive
|
||||
displayName: Update PyTorch submodules
|
||||
|
||||
# Build PyTorch and run unit tests - no packaging
|
||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
||||
# Build PyTorch from source in develop mode
|
||||
- bash: python setup.py develop
|
||||
displayName: Build PyTorch from source
|
||||
|
||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
||||
- bash: python test/test_torch.py TestTorch
|
||||
displayName: Run TestTorch unit tests
|
||||
|
||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
||||
# Run all unit tests to demonstrate successful PyTorch build
|
||||
- bash: python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
||||
displayName: Run all unit tests
|
||||
|
||||
# Run ComponentGovernance
|
||||
- task: ComponentGovernanceComponentDetection@0
|
||||
inputs:
|
||||
scanType: 'Register'
|
||||
verbosity: 'Verbose'
|
||||
alertWarningLevel: 'High'
|
||||
|
||||
# Build PyTorch and produce artifacts for verification stage
|
||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
||||
# Build PyTorch from source in install mode and exclude test binaries
|
||||
- bash: python setup.py install
|
||||
displayName: Build PyTorch from source without test binaries
|
||||
|
||||
# Package PyTorch Wheel
|
||||
- bash: python setup.py bdist_wheel
|
||||
displayName: Package PyTorch Wheel
|
||||
|
||||
# Publish PyTorch Wheel
|
||||
- task: PublishPipelineArtifact@1
|
||||
inputs:
|
||||
targetPath: $(Build.SourcesDirectory)/dist/
|
||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
||||
|
||||
# Verification stage
|
||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)/verify
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Install PyTorch Wheel on Windows
|
||||
- bash: python -m pip install $(Build.SourcesDirectory)/verify/torch*linux*.whl
|
||||
displayName: Install PyTorch Wheel
|
||||
|
||||
# Ensure PyTorch installed correctly from produced wheel
|
||||
- bash: |
|
||||
cd $(Build.SourcesDirectory)/verify
|
||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
||||
displayName: Check PyTorch correctly installed from wheel
|
||||
|
||||
# Publishing stage
|
||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)/publish
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Publish wheel to Azure Artifacts
|
||||
# The flag continueOnError=true is needed as the artifact to be published
|
||||
# may already exist, because the artifact is differentiated based on the
|
||||
# last commit date.
|
||||
- bash: |
|
||||
export TORCH_VERSION=$(head -c 5 ./version.txt)
|
||||
export LAST_COMMIT=$(git rev-parse --short HEAD)
|
||||
export LAST_COMMIT_DATE=$(git log -1 --pretty=%ad --date=format:%Y%m%d)
|
||||
cd $(Build.SourcesDirectory)/publish
|
||||
export TORCH_WHEEL=$(echo torch*linux*whl)
|
||||
az extension add -n azure-devops
|
||||
echo $ADOTOKEN | az devops login
|
||||
az artifacts universal publish --organization $AZURE_DEVOPS_ARTIFACTS_ORGANIZATION --project $AZURE_DEVOPS_ARTIFACTS_PROJECT --scope project --feed "PyTorch" --name $TORCH_WHEEL --description "PyTorch Official Build Artifact" --version $TORCH_VERSION-$LAST_COMMIT_DATE-$LAST_COMMIT --path .
|
||||
env:
|
||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
continueOnError: true
|
||||
displayName: Upload PyTorch Official Build package to Azure Artifacts
|
||||
@ -1,150 +0,0 @@
|
||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
os: ''
|
||||
cuda: ''
|
||||
is_ci_build: False
|
||||
is_official_build: False
|
||||
is_daily_build: False
|
||||
build_stage: False
|
||||
verify_stage: False
|
||||
publish_stage: False
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 300
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
CMAKE_GENERATOR: Ninja
|
||||
PACKAGE_PDBS: 0
|
||||
|
||||
steps:
|
||||
# Prepare for PyTorch build on Windows
|
||||
- template: prepare-build-template.yml
|
||||
parameters:
|
||||
configuration: $(configuration)
|
||||
build_stage: ${{ parameters.build_stage}}
|
||||
|
||||
# Build Stage
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Set up environment variables for specific pipeline build
|
||||
- template: set-environment-variables.yml
|
||||
parameters:
|
||||
os: ${{ parameters.os}}
|
||||
cuda: ${{ parameters.cuda}}
|
||||
is_official_build: ${{ parameters.is_official_build}}
|
||||
|
||||
# Sync and update PyTorch submodules
|
||||
- script: git submodule update --init --recursive
|
||||
displayName: Update PyTorch submodules
|
||||
|
||||
# Build PyTorch and run unit tests - no packaging
|
||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
||||
# Build PyTorch from source in develop mode with Ninja
|
||||
- script: call activate $(configuration) && python setup.py develop
|
||||
displayName: Build PyTorch from source
|
||||
|
||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
||||
- script: call activate $(configuration) && python test\test_torch.py TestTorch
|
||||
displayName: Run TestTorch unit tests
|
||||
|
||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
||||
# Run all unit tests to demonstrate successful PyTorch build
|
||||
- script: call activate $(configuration) && python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
||||
displayName: Run all unit tests
|
||||
|
||||
# Run ComponentGovernance
|
||||
- task: ComponentGovernanceComponentDetection@0
|
||||
inputs:
|
||||
scanType: 'Register'
|
||||
verbosity: 'Verbose'
|
||||
alertWarningLevel: 'High'
|
||||
|
||||
# Build PyTorch and produce artifacts for verification stage
|
||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
||||
# Build PyTorch from source in install mode with Ninja and exclude test binaries
|
||||
- script: call activate $(configuration) && python setup.py install
|
||||
displayName: Build PyTorch from source without test binaries
|
||||
|
||||
# Package PyTorch Wheel
|
||||
- script: call activate $(configuration) && python setup.py bdist_wheel
|
||||
displayName: Package PyTorch Wheel
|
||||
|
||||
# Publish PyTorch Wheel
|
||||
- task: PublishPipelineArtifact@1
|
||||
inputs:
|
||||
targetPath: $(Build.SourcesDirectory)\dist\
|
||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
||||
|
||||
# Verification Stage
|
||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)\verify
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Install PyTorch Wheel on Windows
|
||||
- script: |
|
||||
call activate $(configuration)
|
||||
cd $(Build.SourcesDirectory)\verify
|
||||
dir torch*win*.whl /b > whl.txt
|
||||
set /p whl= < whl.txt
|
||||
python -m pip install %whl%
|
||||
displayName: Install PyTorch Wheel
|
||||
|
||||
# Ensure PyTorch installed correctly from produced wheel
|
||||
- script: |
|
||||
call activate $(configuration)
|
||||
cd $(Build.SourcesDirectory)\verify
|
||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
||||
displayName: Check PyTorch correctly installed from wheel
|
||||
|
||||
# Publishing stage
|
||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)\publish
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Set up Azure Artifacts for Windows
|
||||
# The pip install --upgrade command is a bug fix for Azure CLI on Windows
|
||||
# More info: https://github.com/Azure/azure-cli/issues/16858
|
||||
- script: |
|
||||
pip install --upgrade pip --target \opt\az\lib\python3.6\site-packages\
|
||||
az extension add -n azure-devops
|
||||
displayName: Set up Azure Artifacts download on Windows
|
||||
|
||||
# Publish wheel to Azure Artifacts
|
||||
# The flag continueOnError=true is needed as the artifact to be published
|
||||
# may already exist, because the artifact is differentiated based on the
|
||||
# last commit date.
|
||||
- script: |
|
||||
set /p TORCH_VERSION= < version.txt
|
||||
cd $(Build.SourcesDirectory)\publish
|
||||
git rev-parse --short HEAD > last_commit.txt && set /p LAST_COMMIT= < last_commit.txt
|
||||
git log -1 --pretty=%ad --date=format:%Y%m%d > last_commit_date.txt && set /p LAST_COMMIT_DATE= < last_commit_date.txt
|
||||
dir torch*win*.whl /b > whl.txt && set /p TORCH_WHEEL= < whl.txt
|
||||
echo %ADOTOKEN% | az devops login
|
||||
az artifacts universal publish --organization %AZURE_DEVOPS_ARTIFACTS_ORGANIZATION% --project %AZURE_DEVOPS_ARTIFACTS_PROJECT% --scope project --feed "PyTorch" --name %TORCH_WHEEL% --description "PyTorch Official Build Artifact" --version %TORCH_VERSION:~0,5%-%LAST_COMMIT_DATE%-%LAST_COMMIT% --path .
|
||||
env:
|
||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
continueOnError: true
|
||||
displayName: Upload PyTorch nigthly package to Azure Artifacts
|
||||
@ -1,17 +0,0 @@
|
||||
dependencies:
|
||||
- python=PYTHON_VERSION
|
||||
- numpy
|
||||
- ninja
|
||||
- pyyaml
|
||||
- mkl
|
||||
- mkl-include
|
||||
- setuptools
|
||||
- cmake
|
||||
- cffi
|
||||
- typing_extensions
|
||||
- future
|
||||
- six
|
||||
- requests
|
||||
- dataclasses
|
||||
- pip:
|
||||
- -r ../../requirements.txt
|
||||
@ -1,62 +0,0 @@
|
||||
# Build prepare steps for PyTorch on Azure DevOps to build from source.
|
||||
# These steps share between normal build process and semmle security scan tasks
|
||||
|
||||
parameters:
|
||||
build_stage: False
|
||||
configuration: ''
|
||||
|
||||
steps:
|
||||
# End Python tasks that may be lingering over from previous runs
|
||||
# Note: If python.exe isn't currently running, exit code becomes 128,
|
||||
# which fails the run. Here exit code is set to 0 to avoid failed run.
|
||||
- script: |
|
||||
taskkill /f /im python.exe
|
||||
IF %ERRORLEVEL% EQU 128 exit 0
|
||||
displayName: End previous Python processes
|
||||
|
||||
# Clean up env directory in conda for fresh builds and set up conda environment YAML
|
||||
- powershell: |
|
||||
Remove-Item 'C:\Miniconda\envs' -Recurse -ErrorAction Ignore
|
||||
$env:PYTHON_VERSION = $env:SYSTEM_JOBNAME.Substring(3,1) + '.' + $env:SYSTEM_JOBNAME.Substring(4,1)
|
||||
(Get-Content .azure_pipelines\job_templates\common-packages.yml) -replace 'PYTHON_VERSION', $env:PYTHON_VERSION | Out-File -encoding ASCII .azure_pipelines\job_templates\common-packages.yml
|
||||
displayName: Clean up previous environments and Set up conda environment YAML
|
||||
|
||||
# Make conda environment and install required packages
|
||||
- script: |
|
||||
call conda clean --all -y
|
||||
call conda env create -n $(configuration) --file .azure_pipelines\job_templates\common-packages.yml
|
||||
call activate $(configuration)
|
||||
call conda install -c conda-forge libuv=1.39
|
||||
displayName: Set up conda environment for building from source
|
||||
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Install MKL
|
||||
- script: |
|
||||
rmdir /s /q mkl
|
||||
del mkl_2020.2.254.7z
|
||||
curl https://s3.amazonaws.com/ossci-windows/mkl_2020.2.254.7z -k -O
|
||||
7z x -aoa mkl_2020.2.254.7z -omkl
|
||||
displayName: Install MKL
|
||||
|
||||
# Install sccache and randomtemp
|
||||
# Related PyTorch GitHub issue: https://github.com/pytorch/pytorch/issues/25393
|
||||
# Related fix: https://github.com/pytorch/builder/pull/448/
|
||||
- script: |
|
||||
mkdir .\tmp_bin
|
||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output .\tmp_bin\sccache.exe
|
||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache-cl.exe --output .\tmp_bin\sccache-cl.exe
|
||||
copy .\tmp_bin\sccache.exe .\tmp_bin\nvcc.exe
|
||||
curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.3/randomtemp.exe --output .\tmp_bin\randomtemp.exe
|
||||
displayName: Install sccache and randomtemp
|
||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
||||
|
||||
# CUDA 11.2's CUB directory conflicts with CUDA 10.2 and 10.1
|
||||
# builds, where CUDA 11.2's CUB is injected into non-CUDA
|
||||
# 11.2 builds.
|
||||
- powershell: Remove-Item "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include\cub" -Recurse -ErrorAction Ignore
|
||||
displayName: Remove conflicting CUB from CUDA installation
|
||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
||||
|
||||
- powershell: Copy-Item -Path "F:\cuda_11_2\cub\" -Destination "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include" -Recurse
|
||||
displayName: Copy CUDA CUB for CUDA 11.2 build
|
||||
condition: eq(variables.CUDA_VERSION, '112')
|
||||
@ -1,61 +0,0 @@
|
||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
container_endpoint: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
DECODE_PERCENTS: false
|
||||
|
||||
steps:
|
||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
||||
- checkout: none
|
||||
|
||||
# Delete pytorch_tests repo from previous builds if exists
|
||||
- bash: rm -rf pytorch_tests/
|
||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
||||
|
||||
# Clone PyTorch Tests repository
|
||||
- bash: |
|
||||
B64_PAT=$(printf "%s"":$_ADOTOKEN" | base64)
|
||||
git -c http.extraHeader="Authorization: Basic ${B64_PAT}" clone $(AZURE_DEVOPS_PYTORCH_TESTS_REPO_URL)
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
|
||||
# Run PyTorch Unit Tests
|
||||
- bash: bash $(Build.SourcesDirectory)/pytorch_tests/scripts/linux/run.sh
|
||||
env:
|
||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
||||
_TS_P: $(TS_PAT)
|
||||
_TS_SM_P: $(TS_SM_PAT)
|
||||
_AZUREML_CLONE_PASSWORD: $(AZUREML_CLONE_PASSWORD)
|
||||
_SPPASSWORD: $(SPPASSWORD)
|
||||
displayName: Run PyTorch Unit Tests
|
||||
|
||||
# Tests results are available outside the docker container since
|
||||
# the current directory is mounted as a volume of the container.
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
inputs:
|
||||
testResultsFiles: '**/test-*.xml'
|
||||
testRunTitle: 'Publish test results for Python'
|
||||
@ -1,57 +0,0 @@
|
||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
|
||||
steps:
|
||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
||||
- checkout: none
|
||||
|
||||
# Delete pytorch_tests repo from previous builds if exists
|
||||
- script: if exist "pytorch_tests/" rmdir "pytorch_tests/" /q /s
|
||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
||||
|
||||
# Clone PyTorch Tests repository
|
||||
- powershell: |
|
||||
$env:B64Pat = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes(":$env:_ADOTOKEN"))
|
||||
git -c http.extraHeader="Authorization: Basic $env:B64Pat" clone $env:AZURE_DEVOPS_pytorch_tests_REPO_URL
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
|
||||
# Run PyTorch Unit Tests
|
||||
- script: call $(Build.SourcesDirectory)\pytorch_tests\scripts\windows\run.bat
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
||||
_TS_P: $(TS_PAT)
|
||||
_TS_SM_P: $(TS_SM_PAT)
|
||||
displayName: Run PyTorch Unit Tests
|
||||
|
||||
# Tests results are available outside the docker container since
|
||||
# the current directory is mounted as a volume of the container.
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
inputs:
|
||||
testResultsFiles: '**\test-*.xml'
|
||||
testRunTitle: 'Publish test results for Python'
|
||||
@ -1,131 +0,0 @@
|
||||
# Set environment variables for specific configurations
|
||||
|
||||
parameters:
|
||||
is_official_build: False
|
||||
os: ''
|
||||
cuda: ''
|
||||
|
||||
steps:
|
||||
# Environment configuration steps for Ubuntu builds
|
||||
- ${{ if contains(parameters.os, 'ubuntu') }}:
|
||||
# Set configuration specific build flags
|
||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
||||
export PYTORCH_VERSION=$(head -c 5 ./version.txt)
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
||||
displayName: Set configuration-specific build flags
|
||||
|
||||
# Set PyTorch CPU/GPU build flags.
|
||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=USE_CUDA;]0"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
||||
displayName: Set CUDA-specific build flag for CPU builds
|
||||
|
||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=USE_CUDA;]1"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
||||
displayName: Set CUDA-specific build flag for GPU builds
|
||||
|
||||
# Set MKL environment variables
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]/opt/intel/lib:$CMAKE_LIBRARY_PATH"
|
||||
echo "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]/opt/intel/include:$CMAKE_INCLUDE_PATH"
|
||||
displayName: Set MKL paths
|
||||
|
||||
# View current environment variables
|
||||
- bash:
|
||||
printenv
|
||||
displayName: Show environment variables
|
||||
|
||||
# Environment configuration steps for Windows builds
|
||||
- ${{ if contains(parameters.os, 'windows') }}:
|
||||
# Set Conda Lib Path
|
||||
- powershell: Write-Host "##vso[task.setvariable variable=CONDA_LIB_PATH;]C:\Miniconda\envs\$(configuration)\Library\bin"
|
||||
displayName: Set Conda Lib Path
|
||||
|
||||
# Set configuration specific build flags
|
||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
||||
Set-Variable -Name PYTORCH_VERSION -Value (Get-Content .\version.txt).Substring(0,5)
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
||||
displayName: Set configuration-specific build flags
|
||||
|
||||
# Set PyTorch CPU/GPU build flags..
|
||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]0"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
||||
displayName: Set CUDA-specific build flag for CPU build
|
||||
|
||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]1"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
||||
displayName: Set CUDA-specific build flag for GPU build
|
||||
|
||||
# Set CUDA 11.2, 10.2 or 10.1 specific build flags
|
||||
- ${{ if eq(parameters.cuda, 'gpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5;8.0;8.6"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\"
|
||||
displayName: Set CUDA 11.2 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '112')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\"
|
||||
displayName: Set CUDA 10.2 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '102')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\"
|
||||
displayName: Set CUDA 10.1 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '101')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_BIN_PATH;]$env:CUDA_PATH\bin\"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_ROOT;]$env:CUDA_PATH"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_INCLUDE_DIR;]$env:CUDA_PATH\include\"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_LIBRARY;]$env:CUDA_PATH\lib\x64\"
|
||||
Write-Host "##vso[task.prependpath]$env:CUDA_PATH\bin"
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_NVCC_FLAGS;]-Xfatbin -compress-all --no-host-device-move-forward"
|
||||
Write-Host "##vso[task.setvariable variable=THRUST_IGNORE_CUB_VERSION_CHECK;]1"
|
||||
Write-Host "##vso[task.setvariable variable=NVTOOLSEXT_PATH;]C:\Program Files\NVIDIA Corporation\NvToolsExt\"
|
||||
displayName: Set CUDA environment variables
|
||||
|
||||
- powershell: |
|
||||
copy "$(CUDA_BIN_PATH)\cusparse*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cublas*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cudart*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\curand*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cufft*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cusolver*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cudnn*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\nvrtc*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64\nvToolsExt64_1.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CONDA_LIB_PATH)\libiomp*5md.dll" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CONDA_LIB_PATH)\uv.dll" $(Build.SourcesDirectory)\torch\lib
|
||||
displayName: Copy CUDA/cuDNN/libomp/libuv dlls to torch\lib
|
||||
|
||||
# Set MKL, sccache and randomtemp environment variables
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]$(Build.SourcesDirectory)\mkl\include"
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]$(Build.SourcesDirectory)\mkl\lib;$env:CMAKE_LIBRARY_PATH"
|
||||
Write-Host "##vso[task.setvariable variable=ADDITIONAL_PATH;]$(Build.SourcesDirectory)\tmp_bin"
|
||||
Write-Host "##vso[task.setvariable variable=SCCACHE_IDLE_TIMEOUT;]1500"
|
||||
Write-Host "##vso[task.setvariable variable=RANDOMTEMP_EXECUTABLE;]$(Build.SourcesDirectory)\tmp_bin\nvcc.exe"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_NVCC_EXECUTABLE;]$(Build.SourcesDirectory)\tmp_bin\randomtemp.exe"
|
||||
Write-Host "##vso[task.setvariable variable=RANDOMTEMP_BASEDIR;]$(Build.SourcesDirectory)\tmp_bin"
|
||||
displayName: Set MKL, sccache and randomtemp environment variables
|
||||
|
||||
# View current environment variables
|
||||
- script:
|
||||
set
|
||||
displayName: Show environment variables
|
||||
@ -1,14 +0,0 @@
|
||||
# Main logic to initiate wait for PR artifact to be ready
|
||||
|
||||
steps:
|
||||
- task: InvokeRESTAPI@1
|
||||
displayName: 'Wait for job success and wheel ready'
|
||||
timeoutInMinutes: 60
|
||||
inputs:
|
||||
connectionType: 'connectedServiceName'
|
||||
serviceConnection: circleciconn
|
||||
method: 'POST'
|
||||
headers: '{"Content-Type":"application/json", "BranchName":"$(_TARGET_BRANCH_TO_CHECK)", "JobName":"$(TARGET_CIRCLECI_BUILD_PR)", "PRNumber":"$(_NUMBER_BUILD_PR)", "TargetCommit":"$(_TARGET_COMMIT)", "PlanUrl":"$(System.CollectionUri)", "ProjectId":"$(System.TeamProjectId)", "HubName":"$(System.HostType)", "PlanId":"$(System.PlanId)", "JobId":"$(System.JobId)", "TimelineId":"$(System.TimelineId)", "TaskInstanceId":"$(System.TaskInstanceId)", "AuthToken":"$(System.AccessToken)"}'
|
||||
body: ''
|
||||
urlSuffix: 'api/JobStatus'
|
||||
waitForCompletion: true
|
||||
@ -1,92 +0,0 @@
|
||||
# Initiate 5 agentless-server waiting jobs to check on the
|
||||
# status of PR artifact builds, for a maximum wait time of
|
||||
# 11*60 min=660 mins. These jobs will pass immediately
|
||||
# once targeted CircleCI build is ready.
|
||||
|
||||
jobs:
|
||||
- job: checkjob1
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob2
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob1
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob3
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob2
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob4
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob3
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob5
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob4
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob6
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob5
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob7
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob6
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob8
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob7
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob9
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob8
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob10
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob9
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob11
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob10
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
@ -1,50 +0,0 @@
|
||||
# PyTorch Nightly PyTorch Tests Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline runs custom PyTorch unit-tests on nightly
|
||||
# PyTorch wheels.
|
||||
|
||||
stages:
|
||||
- stage: 'NightlyCustomTests'
|
||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
||||
jobs:
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_1)
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_1)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_1)
|
||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
||||
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: $(BUILD_POOL_LIN_2)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_2)
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_2)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_2)
|
||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
||||
|
||||
- template: job_templates/pytorch-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: $(BUILD_POOL_WIN_1)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_1)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_1)
|
||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
||||
|
||||
- template: job_templates/pytorch-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: $(BUILD_POOL_WIN_2)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_2)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_2)
|
||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
||||
@ -1,46 +0,0 @@
|
||||
# PyTorch PR PyTorch Tests Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) ensures that CircleCI builds for a given PR
|
||||
# have finished, and that its artifacts are
|
||||
# ready for download
|
||||
# 2) runs custom PyTorch unit-tests on PyTorch
|
||||
# wheels generated during PR builds.
|
||||
|
||||
resources:
|
||||
webhooks:
|
||||
- webhook: GitHubPyTorchPRTrigger
|
||||
connection: GitHubPyTorchPRTriggerConnection
|
||||
filters:
|
||||
- path: repositoryName
|
||||
value: pytorch_tests
|
||||
|
||||
stages:
|
||||
- stage: 'EnsureArtifactsReady'
|
||||
displayName: 'Ensure PyTorch PR Artifacts are ready'
|
||||
jobs:
|
||||
- template: job_templates/wheel-wait-template.yml
|
||||
variables:
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_NUMBER_BUILD_PR: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
|
||||
- stage: 'PRCustomTests'
|
||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
||||
dependsOn: EnsureArtifactsReady
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: $(BUILD_POOL_PR)
|
||||
customMatrixes:
|
||||
PR_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_PR)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_PR)
|
||||
_TARGET_CIRCLECI_BUILD: $(TARGET_CIRCLECI_BUILD_PR)
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_NUMBER_BUILD_PR: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_PR)
|
||||
_RUN_TESTS: $(RUN_TESTS_PR)
|
||||
@ -1,224 +0,0 @@
|
||||
# PyTorch Official Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on all available configurations
|
||||
# 2) verifies PyTorch artifacts by installing them in a clean environment
|
||||
# and checking torch.__version_
|
||||
# 3) publishes official PyTorch artifacts to Azure DevOps Artifacts for consumption
|
||||
|
||||
stages:
|
||||
- stage: 'Build'
|
||||
displayName: 'Build PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- stage: 'Verify'
|
||||
displayName: 'Verify PyTorch wheels'
|
||||
dependsOn: Build
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- stage: 'Publish'
|
||||
displayName: 'Publish PyTorch wheels'
|
||||
dependsOn: Verify
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
3
.bazelrc
3
.bazelrc
@ -1,3 +0,0 @@
|
||||
build --copt=--std=c++14
|
||||
build --copt=-I.
|
||||
build --copt=-isystem --copt bazel-out/k8-fastbuild/bin
|
||||
@ -1 +0,0 @@
|
||||
3.1.0
|
||||
@ -31,7 +31,7 @@ Usage
|
||||
1. Make changes to these scripts.
|
||||
2. Run the `regenerate.sh` script in this directory and commit the script changes and the resulting change to `config.yml`.
|
||||
|
||||
You'll see a build failure on GitHub if the scripts don't agree with the checked-in version.
|
||||
You'll see a build failure on TravisCI if the scripts don't agree with the checked-in version.
|
||||
|
||||
|
||||
Motivation
|
||||
@ -55,7 +55,7 @@ Future direction
|
||||
See comment [here](https://github.com/pytorch/pytorch/pull/17323#pullrequestreview-206945747):
|
||||
|
||||
In contrast with a full recursive tree traversal of configuration dimensions,
|
||||
> in the future I think we actually want to decrease our matrix somewhat and have only a few mostly-orthogonal builds that taste as many different features as possible on PRs, plus a more complete suite on every PR and maybe an almost full suite nightly/weekly (we don't have this yet). Specifying PR jobs in the future might be easier to read with an explicit list when we come to this.
|
||||
> in the future future I think we actually want to decrease our matrix somewhat and have only a few mostly-orthogonal builds that taste as many different features as possible on PRs, plus a more complete suite on every PR and maybe an almost full suite nightly/weekly (we don't have this yet). Specifying PR jobs in the future might be easier to read with an explicit list when we come to this.
|
||||
|
||||
----------------
|
||||
----------------
|
||||
@ -71,9 +71,9 @@ A **binary configuration** is a collection of
|
||||
* release or nightly
|
||||
* releases are stable, nightlies are beta and built every night
|
||||
* python version
|
||||
* linux: 3.5m, 3.6m 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
|
||||
* macos: 3.6, 3.7, 3.8
|
||||
* windows: 3.6, 3.7, 3.8
|
||||
* linux: 2.7m, 2.7mu, 3.5m, 3.6m 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
|
||||
* macos: 2.7, 3.5, 3.6, 3.7
|
||||
* windows: 3.5, 3.6, 3.7
|
||||
* cpu version
|
||||
* cpu, cuda 9.0, cuda 10.0
|
||||
* The supported cuda versions occasionally change
|
||||
@ -90,7 +90,7 @@ The binaries are built in CircleCI. There are nightly binaries built every night
|
||||
|
||||
We have 3 types of binary packages
|
||||
|
||||
* pip packages - nightlies are stored on s3 (pip install -f \<a s3 url\>). releases are stored in a pip repo (pip install torch) (ask Soumith about this)
|
||||
* pip packages - nightlies are stored on s3 (pip install -f <a s3 url>). releases are stored in a pip repo (pip install torch) (ask Soumith about this)
|
||||
* conda packages - nightlies and releases are both stored in a conda repo. Nighty packages have a '_nightly' suffix
|
||||
* libtorch packages - these are zips of all the c++ libraries, header files, and sometimes dependencies. These are c++ only
|
||||
* shared with dependencies (the only supported option for Windows)
|
||||
@ -104,16 +104,16 @@ All binaries are built in CircleCI workflows except Windows. There are checked-i
|
||||
|
||||
Some quick vocab:
|
||||
|
||||
* A \**workflow** is a CircleCI concept; it is a DAG of '**jobs**'. ctrl-f 'workflows' on https://github.com/pytorch/pytorch/blob/master/.circleci/config.yml to see the workflows.
|
||||
* A\**workflow** is a CircleCI concept; it is a DAG of '**jobs**'. ctrl-f 'workflows' on\https://github.com/pytorch/pytorch/blob/master/.circleci/config.yml to see the workflows.
|
||||
* **jobs** are a sequence of '**steps**'
|
||||
* **steps** are usually just a bash script or a builtin CircleCI command. *All steps run in new environments, environment variables declared in one script DO NOT persist to following steps*
|
||||
* **steps** are usually just a bash script or a builtin CircleCI command.* All steps run in new environments, environment variables declared in one script DO NOT persist to following steps*
|
||||
* CircleCI has a **workspace**, which is essentially a cache between steps of the *same job* in which you can store artifacts between steps.
|
||||
|
||||
## How are the workflows structured?
|
||||
|
||||
The nightly binaries have 3 workflows. We have one job (actually 3 jobs: build, test, and upload) per binary configuration
|
||||
|
||||
1. binary_builds
|
||||
1. binarybuilds
|
||||
1. every day midnight EST
|
||||
2. linux: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
3. macos: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
@ -144,7 +144,7 @@ The nightly binaries have 3 workflows. We have one job (actually 3 jobs: build,
|
||||
|
||||
## How are the jobs structured?
|
||||
|
||||
The jobs are in https://github.com/pytorch/pytorch/tree/master/.circleci/verbatim-sources. Jobs are made of multiple steps. There are some shared steps used by all the binaries/smokes. Steps of these jobs are all delegated to scripts in https://github.com/pytorch/pytorch/tree/master/.circleci/scripts .
|
||||
The jobs are in https://github.com/pytorch/pytorch/tree/master/.circleci/verbatim-sources . Jobs are made of multiple steps. There are some shared steps used by all the binaries/smokes. Steps of these jobs are all delegated to scripts in https://github.com/pytorch/pytorch/tree/master/.circleci/scripts .
|
||||
|
||||
* Linux jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
* binary_linux_build.sh
|
||||
@ -178,7 +178,8 @@ CircleCI creates a final yaml file by inlining every <<* segment, so if we were
|
||||
So, CircleCI has several executor types: macos, machine, and docker are the ones we use. The 'machine' executor gives you two cores on some linux vm. The 'docker' executor gives you considerably more cores (nproc was 32 instead of 2 back when I tried in February). Since the dockers are faster, we try to run everything that we can in dockers. Thus
|
||||
|
||||
* linux build jobs use the docker executor. Running them on the docker executor was at least 2x faster than running them on the machine executor
|
||||
* linux test jobs use the machine executor in order for them to properly interface with GPUs since docker executors cannot execute with attached GPUs
|
||||
* linux test jobs use the machine executor and spin up their own docker. Why this nonsense? It's cause we run nvidia-docker for our GPU tests; any code that calls into the CUDA runtime needs to be run on nvidia-docker. To run a nvidia-docker you need to install some nvidia packages on the host machine and then call docker with the '—runtime nvidia' argument. CircleCI doesn't support this, so we have to do it ourself.
|
||||
* This is not just a mere inconvenience. **This blocks all of our linux tests from using more than 2 cores.** But there is nothing that we can do about it, but wait for a fix on circleci's side. Right now, we only run some smoke tests (some simple imports) on the binaries, but this also affects non-binary test jobs.
|
||||
* linux upload jobs use the machine executor. The upload jobs are so short that it doesn't really matter what they use
|
||||
* linux smoke test jobs use the machine executor for the same reason as the linux test jobs
|
||||
|
||||
@ -204,7 +205,7 @@ TODO: fill in stuff
|
||||
|
||||
## Overview
|
||||
|
||||
The code that runs the binaries lives in two places, in the normal [github.com/pytorch/pytorch](http://github.com/pytorch/pytorch), but also in [github.com/pytorch/builder](http://github.com/pytorch/builder), which is a repo that defines how all the binaries are built. The relevant code is
|
||||
The code that runs the binaries lives in two places, in the normal [github.com/pytorch/pytorch](http://github.com/pytorch/pytorch), but also in [github.com/pytorch/builder](http://github.com/pytorch/builder) , which is a repo that defines how all the binaries are built. The relevant code is
|
||||
|
||||
|
||||
```
|
||||
@ -260,7 +261,7 @@ Linux, MacOS and Windows use the same code flow for the conda builds.
|
||||
Conda packages are built with conda-build, see https://conda.io/projects/conda-build/en/latest/resources/commands/conda-build.html
|
||||
|
||||
Basically, you pass `conda build` a build folder (pytorch-nightly/ above) that contains a build script and a meta.yaml. The meta.yaml specifies in what python environment to build the package in, and what dependencies the resulting package should have, and the build script gets called in the env to build the thing.
|
||||
tl;dr on conda-build is
|
||||
tldr; on conda-build is
|
||||
|
||||
1. Creates a brand new conda environment, based off of deps in the meta.yaml
|
||||
1. Note that environment variables do not get passed into this build env unless they are specified in the meta.yaml
|
||||
@ -270,7 +271,7 @@ tl;dr on conda-build is
|
||||
4. Runs some simple import tests (if specified in the meta.yaml)
|
||||
5. Saves the finished package as a tarball
|
||||
|
||||
The build.sh we use is essentially a wrapper around `python setup.py build`, but it also manually copies in some of our dependent libraries into the resulting tarball and messes with some rpaths.
|
||||
The build.sh we use is essentially a wrapper around ```python setup.py build``` , but it also manually copies in some of our dependent libraries into the resulting tarball and messes with some rpaths.
|
||||
|
||||
The entrypoint file `builder/conda/build_conda.sh` is complicated because
|
||||
|
||||
@ -339,12 +340,12 @@ Libtorch packages are built in the wheel build scripts: manywheel/build_*.sh for
|
||||
|
||||
All linux builds occur in docker images. The docker images are
|
||||
|
||||
* pytorch/conda-cuda
|
||||
* soumith/conda-cuda
|
||||
* Has ALL CUDA versions installed. The script pytorch/builder/conda/switch_cuda_version.sh sets /usr/local/cuda to a symlink to e.g. /usr/local/cuda-10.0 to enable different CUDA builds
|
||||
* Also used for cpu builds
|
||||
* pytorch/manylinux-cuda90
|
||||
* pytorch/manylinux-cuda92
|
||||
* pytorch/manylinux-cuda100
|
||||
* soumith/manylinux-cuda90
|
||||
* soumith/manylinux-cuda92
|
||||
* soumith/manylinux-cuda100
|
||||
* Also used for cpu builds
|
||||
|
||||
The Dockerfiles are available in pytorch/builder, but there is no circleci job or script to build these docker images, and they cannot be run locally (unless you have the correct local packages/paths). Only Soumith can build them right now.
|
||||
@ -355,15 +356,15 @@ The Dockerfiles are available in pytorch/builder, but there is no circleci job o
|
||||
|
||||
# How to manually rebuild the binaries
|
||||
|
||||
tl;dr make a PR that looks like https://github.com/pytorch/pytorch/pull/21159
|
||||
tldr; make a PR that looks like https://github.com/pytorch/pytorch/pull/21159
|
||||
|
||||
Sometimes we want to push a change to master and then rebuild all of today's binaries after that change. As of May 30, 2019 there isn't a way to manually run a workflow in the UI. You can manually re-run a workflow, but it will use the exact same git commits as the first run and will not include any changes. So we have to make a PR and then force circleci to run the binary workflow instead of the normal tests. The above PR is an example of how to do this; essentially you copy-paste the binarybuilds workflow steps into the default workflow steps. If you need to point the builder repo to a different commit then you'd need to change https://github.com/pytorch/pytorch/blob/master/.circleci/scripts/binary_checkout.sh#L42-L45 to checkout what you want.
|
||||
|
||||
## How to test changes to the binaries via .circleci
|
||||
|
||||
Writing PRs that test the binaries is annoying, since the default circleci jobs that run on PRs are not the jobs that you want to run. Likely, changes to the binaries will touch something under .circleci/ and require that .circleci/config.yml be regenerated (.circleci/config.yml controls all .circleci behavior, and is generated using `.circleci/regenerate.sh` in python 3.7). But you also need to manually hardcode the binary jobs that you want to test into the .circleci/config.yml workflow, so you should actually make at least two commits, one for your changes and one to temporarily hardcode jobs. See https://github.com/pytorch/pytorch/pull/22928 as an example of how to do this.
|
||||
Writing PRs that test the binaries is annoying, since the default circleci jobs that run on PRs are not the jobs that you want to run. Likely, changes to the binaries will touch something under .circleci/ and require that .circleci/config.yml be regenerated (.circleci/config.yml controls all .circleci behavior, and is generated using ```.circleci/regenerate.sh``` in python 3.7). But you also need to manually hardcode the binary jobs that you want to test into the .circleci/config.yml workflow, so you should actually make at least two commits, one for your changes and one to temporarily hardcode jobs. See https://github.com/pytorch/pytorch/pull/22928 as an example of how to do this.
|
||||
|
||||
```sh
|
||||
```
|
||||
# Make your changes
|
||||
touch .circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
|
||||
@ -408,9 +409,9 @@ The advantage of this flow is that you can make new changes to the base commit a
|
||||
|
||||
You can build Linux binaries locally easily using docker.
|
||||
|
||||
```sh
|
||||
```
|
||||
# Run the docker
|
||||
# Use the correct docker image, pytorch/conda-cuda used here as an example
|
||||
# Use the correct docker image, soumith/conda-cuda used here as an example
|
||||
#
|
||||
# -v path/to/foo:path/to/bar makes path/to/foo on your local machine (the
|
||||
# machine that you're running the command on) accessible to the docker
|
||||
@ -418,12 +419,14 @@ You can build Linux binaries locally easily using docker.
|
||||
# in the docker container then you will see path/to/foo/baz on your local
|
||||
# machine. You could also clone the pytorch and builder repos in the docker.
|
||||
#
|
||||
# If you're building a CUDA binary then use `nvidia-docker run` instead, see below.
|
||||
#
|
||||
# If you know how, add ccache as a volume too and speed up everything
|
||||
docker run \
|
||||
-v your/pytorch/repo:/pytorch \
|
||||
-v your/builder/repo:/builder \
|
||||
-v where/you/want/packages/to/appear:/final_pkgs \
|
||||
-it pytorch/conda-cuda /bin/bash
|
||||
-it soumith/conda-cuda /bin/bash
|
||||
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
@ -441,7 +444,9 @@ export DESIRED_CUDA=cpu
|
||||
|
||||
**Building CUDA binaries on docker**
|
||||
|
||||
You can build CUDA binaries on CPU only machines, but you can only run CUDA binaries on CUDA machines. This means that you can build a CUDA binary on a docker on your laptop if you so choose (though it’s gonna take a long time).
|
||||
To build a CUDA binary you need to use `nvidia-docker run` instead of just `docker run` (or you can manually pass `--runtime=nvidia`). This adds some needed libraries and things to build CUDA stuff.
|
||||
|
||||
You can build CUDA binaries on CPU only machines, but you can only run CUDA binaries on CUDA machines. This means that you can build a CUDA binary on a docker on your laptop if you so choose (though it’s gonna take a loong time).
|
||||
|
||||
For Facebook employees, ask about beefy machines that have docker support and use those instead of your laptop; it will be 5x as fast.
|
||||
|
||||
@ -451,7 +456,7 @@ There’s no easy way to generate reproducible hermetic MacOS environments. If y
|
||||
|
||||
But if you want to try, then I’d recommend
|
||||
|
||||
```sh
|
||||
```
|
||||
# Create a new terminal
|
||||
# Clear your LD_LIBRARY_PATH and trim as much out of your PATH as you
|
||||
# know how to do
|
||||
@ -461,7 +466,7 @@ But if you want to try, then I’d recommend
|
||||
# Always install miniconda 3, even if building for Python <3
|
||||
new_conda="~/my_new_conda"
|
||||
conda_sh="$new_conda/install_miniconda.sh"
|
||||
curl -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
curl -o "$conda_sh" https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
rm -f "$conda_sh"
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
This module models the tree of configuration variants
|
||||
for "smoketest" builds.
|
||||
@ -5,6 +7,9 @@ for "smoketest" builds.
|
||||
Each subclass of ConfigNode represents a layer of the configuration hierarchy.
|
||||
These tree nodes encapsulate the logic for whether a branch of the hierarchy
|
||||
should be "pruned".
|
||||
|
||||
In addition to generating config.yml content, the tree is also traversed
|
||||
to produce a visualization of config dimensions.
|
||||
"""
|
||||
|
||||
from collections import OrderedDict
|
||||
@ -25,53 +30,33 @@ DEPS_INCLUSION_DIMENSIONS = [
|
||||
]
|
||||
|
||||
|
||||
def get_processor_arch_name(gpu_version):
|
||||
return "cpu" if not gpu_version else (
|
||||
"cu" + gpu_version.strip("cuda") if gpu_version.startswith("cuda") else gpu_version
|
||||
)
|
||||
def get_processor_arch_name(cuda_version):
|
||||
return "cpu" if not cuda_version else "cu" + cuda_version
|
||||
|
||||
|
||||
LINUX_PACKAGE_VARIANTS = OrderedDict(
|
||||
manywheel=[
|
||||
"2.7m",
|
||||
"2.7mu",
|
||||
"3.5m",
|
||||
"3.6m",
|
||||
"3.7m",
|
||||
"3.8m",
|
||||
"3.9m"
|
||||
],
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7m",
|
||||
"2.7m",
|
||||
],
|
||||
)
|
||||
|
||||
CONFIG_TREE_DATA = OrderedDict(
|
||||
linux=(dimensions.GPU_VERSIONS, LINUX_PACKAGE_VARIANTS),
|
||||
linux=(dimensions.CUDA_VERSIONS, LINUX_PACKAGE_VARIANTS),
|
||||
macos=([None], OrderedDict(
|
||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7",
|
||||
"2.7",
|
||||
],
|
||||
)),
|
||||
macos_arm64=([None], OrderedDict(
|
||||
wheel=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
],
|
||||
conda=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
],
|
||||
)),
|
||||
windows=(
|
||||
[v for v in dimensions.GPU_VERSIONS if v not in dimensions.ROCM_VERSION_LABELS],
|
||||
OrderedDict(
|
||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7",
|
||||
],
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
# GCC config variants:
|
||||
@ -90,11 +75,6 @@ LINUX_GCC_CONFIG_VARIANTS = OrderedDict(
|
||||
],
|
||||
)
|
||||
|
||||
WINDOWS_LIBTORCH_CONFIG_VARIANTS = [
|
||||
"debug",
|
||||
"release",
|
||||
]
|
||||
|
||||
|
||||
class TopLevelNode(ConfigNode):
|
||||
def __init__(self, node_name, config_tree_data, smoke):
|
||||
@ -108,12 +88,12 @@ class TopLevelNode(ConfigNode):
|
||||
|
||||
|
||||
class OSConfigNode(ConfigNode):
|
||||
def __init__(self, parent, os_name, gpu_versions, py_tree):
|
||||
def __init__(self, parent, os_name, cuda_versions, py_tree):
|
||||
super(OSConfigNode, self).__init__(parent, os_name)
|
||||
|
||||
self.py_tree = py_tree
|
||||
self.props["os_name"] = os_name
|
||||
self.props["gpu_versions"] = gpu_versions
|
||||
self.props["cuda_versions"] = cuda_versions
|
||||
|
||||
def get_children(self):
|
||||
return [PackageFormatConfigNode(self, k, v) for k, v in self.py_tree.items()]
|
||||
@ -126,17 +106,11 @@ class PackageFormatConfigNode(ConfigNode):
|
||||
self.props["python_versions"] = python_versions
|
||||
self.props["package_format"] = package_format
|
||||
|
||||
# XXX Disabling conda for 11.3 as there's currently no appropriate cudatoolkit available
|
||||
if package_format == "conda":
|
||||
self.props["gpu_versions"] = filter(lambda x: x != "cuda113", self.find_prop("gpu_versions"))
|
||||
|
||||
def get_children(self):
|
||||
if self.find_prop("os_name") == "linux":
|
||||
return [LinuxGccConfigNode(self, v) for v in LINUX_GCC_CONFIG_VARIANTS[self.find_prop("package_format")]]
|
||||
elif self.find_prop("os_name") == "windows" and self.find_prop("package_format") == "libtorch":
|
||||
return [WindowsLibtorchConfigNode(self, v) for v in WINDOWS_LIBTORCH_CONFIG_VARIANTS]
|
||||
else:
|
||||
return [ArchConfigNode(self, v) for v in self.find_prop("gpu_versions")]
|
||||
return [ArchConfigNode(self, v) for v in self.find_prop("cuda_versions")]
|
||||
|
||||
|
||||
class LinuxGccConfigNode(ConfigNode):
|
||||
@ -146,39 +120,21 @@ class LinuxGccConfigNode(ConfigNode):
|
||||
self.props["gcc_config_variant"] = gcc_config_variant
|
||||
|
||||
def get_children(self):
|
||||
gpu_versions = self.find_prop("gpu_versions")
|
||||
cuda_versions = self.find_prop("cuda_versions")
|
||||
|
||||
# XXX devtoolset7 on CUDA 9.0 is temporarily disabled
|
||||
# see https://github.com/pytorch/pytorch/issues/20066
|
||||
if self.find_prop("gcc_config_variant") == 'devtoolset7':
|
||||
gpu_versions = filter(lambda x: x != "cuda_90", gpu_versions)
|
||||
cuda_versions = filter(lambda x: x != "90", cuda_versions)
|
||||
|
||||
# XXX disabling conda rocm build since docker images are not there
|
||||
if self.find_prop("package_format") == 'conda':
|
||||
gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions)
|
||||
|
||||
# XXX libtorch rocm build is temporarily disabled
|
||||
if self.find_prop("package_format") == 'libtorch':
|
||||
gpu_versions = filter(lambda x: x not in dimensions.ROCM_VERSION_LABELS, gpu_versions)
|
||||
|
||||
return [ArchConfigNode(self, v) for v in gpu_versions]
|
||||
|
||||
|
||||
class WindowsLibtorchConfigNode(ConfigNode):
|
||||
def __init__(self, parent, libtorch_config_variant):
|
||||
super(WindowsLibtorchConfigNode, self).__init__(parent, "LIBTORCH_CONFIG_VARIANT=" + str(libtorch_config_variant))
|
||||
|
||||
self.props["libtorch_config_variant"] = libtorch_config_variant
|
||||
|
||||
def get_children(self):
|
||||
return [ArchConfigNode(self, v) for v in self.find_prop("gpu_versions")]
|
||||
return [ArchConfigNode(self, v) for v in cuda_versions]
|
||||
|
||||
|
||||
class ArchConfigNode(ConfigNode):
|
||||
def __init__(self, parent, gpu):
|
||||
super(ArchConfigNode, self).__init__(parent, get_processor_arch_name(gpu))
|
||||
def __init__(self, parent, cu):
|
||||
super(ArchConfigNode, self).__init__(parent, get_processor_arch_name(cu))
|
||||
|
||||
self.props["gpu"] = gpu
|
||||
self.props["cu"] = cu
|
||||
|
||||
def get_children(self):
|
||||
return [PyVersionConfigNode(self, v) for v in self.find_prop("python_versions")]
|
||||
@ -191,6 +147,8 @@ class PyVersionConfigNode(ConfigNode):
|
||||
self.props["pyver"] = pyver
|
||||
|
||||
def get_children(self):
|
||||
|
||||
smoke = self.find_prop("smoke")
|
||||
package_format = self.find_prop("package_format")
|
||||
os_name = self.find_prop("os_name")
|
||||
|
||||
|
||||
@ -1,45 +1,33 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
import cimodel.data.simple.util.branch_filters as branch_filters
|
||||
import cimodel.data.binary_build_data as binary_build_data
|
||||
import cimodel.lib.conf_tree as conf_tree
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.lib.visualization as visualization
|
||||
|
||||
|
||||
class Conf(object):
|
||||
def __init__(self, os, gpu_version, pydistro, parms, smoke, libtorch_variant, gcc_config_variant, libtorch_config_variant):
|
||||
def __init__(self, os, cuda_version, pydistro, parms, smoke, libtorch_variant, gcc_config_variant):
|
||||
|
||||
self.os = os
|
||||
self.gpu_version = gpu_version
|
||||
self.cuda_version = cuda_version
|
||||
self.pydistro = pydistro
|
||||
self.parms = parms
|
||||
self.smoke = smoke
|
||||
self.libtorch_variant = libtorch_variant
|
||||
self.gcc_config_variant = gcc_config_variant
|
||||
self.libtorch_config_variant = libtorch_config_variant
|
||||
|
||||
def gen_build_env_parms(self):
|
||||
elems = [self.pydistro] + self.parms + [binary_build_data.get_processor_arch_name(self.gpu_version)]
|
||||
elems = [self.pydistro] + self.parms + [binary_build_data.get_processor_arch_name(self.cuda_version)]
|
||||
if self.gcc_config_variant is not None:
|
||||
elems.append(str(self.gcc_config_variant))
|
||||
if self.libtorch_config_variant is not None:
|
||||
elems.append(str(self.libtorch_config_variant))
|
||||
return elems
|
||||
|
||||
def gen_docker_image(self):
|
||||
if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
|
||||
if self.gpu_version is None:
|
||||
return miniutils.quote("pytorch/libtorch-cxx11-builder:cpu")
|
||||
else:
|
||||
return miniutils.quote(
|
||||
f"pytorch/libtorch-cxx11-builder:{self.gpu_version}"
|
||||
)
|
||||
if self.pydistro == "conda":
|
||||
if self.gpu_version is None:
|
||||
return miniutils.quote("pytorch/conda-builder:cpu")
|
||||
else:
|
||||
return miniutils.quote(
|
||||
f"pytorch/conda-builder:{self.gpu_version}"
|
||||
)
|
||||
return miniutils.quote("soumith/conda-cuda-cxx11-ubuntu1604:latest")
|
||||
|
||||
docker_word_substitution = {
|
||||
"manywheel": "manylinux",
|
||||
@ -48,24 +36,18 @@ class Conf(object):
|
||||
|
||||
docker_distro_prefix = miniutils.override(self.pydistro, docker_word_substitution)
|
||||
|
||||
# The cpu nightlies are built on the pytorch/manylinux-cuda102 docker image
|
||||
# TODO cuda images should consolidate into tag-base images similar to rocm
|
||||
alt_docker_suffix = "cuda102" if not self.gpu_version else (
|
||||
"rocm:" + self.gpu_version.strip("rocm") if self.gpu_version.startswith("rocm") else self.gpu_version)
|
||||
docker_distro_suffix = alt_docker_suffix if self.pydistro != "conda" else (
|
||||
"cuda" if alt_docker_suffix.startswith("cuda") else "rocm")
|
||||
return miniutils.quote("pytorch/" + docker_distro_prefix + "-" + docker_distro_suffix)
|
||||
# The cpu nightlies are built on the soumith/manylinux-cuda100 docker image
|
||||
alt_docker_suffix = self.cuda_version or "100"
|
||||
docker_distro_suffix = "" if self.pydistro == "conda" else alt_docker_suffix
|
||||
return miniutils.quote("soumith/" + docker_distro_prefix + "-cuda" + docker_distro_suffix)
|
||||
|
||||
def get_name_prefix(self):
|
||||
return "smoke" if self.smoke else "binary"
|
||||
|
||||
def gen_build_name(self, build_or_test, nightly):
|
||||
def gen_build_name(self, build_or_test):
|
||||
|
||||
parts = [self.get_name_prefix(), self.os] + self.gen_build_env_parms()
|
||||
|
||||
if nightly:
|
||||
parts.append("nightly")
|
||||
|
||||
if self.libtorch_variant:
|
||||
parts.append(self.libtorch_variant)
|
||||
|
||||
@ -75,86 +57,37 @@ class Conf(object):
|
||||
joined = "_".join(parts)
|
||||
return joined.replace(".", "_")
|
||||
|
||||
def gen_workflow_job(self, phase, upload_phase_dependency=None, nightly=False):
|
||||
def gen_workflow_job(self, phase, upload_phase_dependency=None):
|
||||
job_def = OrderedDict()
|
||||
job_def["name"] = self.gen_build_name(phase, nightly)
|
||||
job_def["name"] = self.gen_build_name(phase)
|
||||
job_def["build_environment"] = miniutils.quote(" ".join(self.gen_build_env_parms()))
|
||||
if self.smoke:
|
||||
job_def["requires"] = [
|
||||
"update_s3_htmls",
|
||||
]
|
||||
job_def["filters"] = branch_filters.gen_filter_dict(
|
||||
branches_list=["postnightly"],
|
||||
)
|
||||
else:
|
||||
filter_branch = r"/.*/"
|
||||
job_def["filters"] = branch_filters.gen_filter_dict(
|
||||
branches_list=[filter_branch],
|
||||
tags_list=[branch_filters.RC_PATTERN],
|
||||
)
|
||||
job_def["requires"] = ["setup"]
|
||||
job_def["filters"] = {"branches": {"only": "nightly"}}
|
||||
if self.libtorch_variant:
|
||||
job_def["libtorch_variant"] = miniutils.quote(self.libtorch_variant)
|
||||
if phase == "test":
|
||||
if not self.smoke:
|
||||
job_def["requires"] = [self.gen_build_name("build", nightly)]
|
||||
if not (self.smoke and self.os == "macos") and self.os != "windows":
|
||||
job_def["requires"].append(self.gen_build_name("build"))
|
||||
if not (self.smoke and self.os == "macos"):
|
||||
job_def["docker_image"] = self.gen_docker_image()
|
||||
|
||||
# fix this. only works on cuda not rocm
|
||||
if self.os != "windows" and self.gpu_version:
|
||||
if self.cuda_version:
|
||||
job_def["use_cuda_docker_runtime"] = miniutils.quote("1")
|
||||
else:
|
||||
if self.os == "linux" and phase != "upload":
|
||||
job_def["docker_image"] = self.gen_docker_image()
|
||||
|
||||
if phase == "test":
|
||||
if self.gpu_version:
|
||||
if self.os == "windows":
|
||||
job_def["executor"] = "windows-with-nvidia-gpu"
|
||||
else:
|
||||
job_def["resource_class"] = "gpu.medium"
|
||||
if self.cuda_version:
|
||||
job_def["resource_class"] = "gpu.medium"
|
||||
if phase == "upload":
|
||||
job_def["context"] = "org-member"
|
||||
job_def["requires"] = ["setup", self.gen_build_name(upload_phase_dependency)]
|
||||
|
||||
os_name = miniutils.override(self.os, {"macos": "mac"})
|
||||
job_name = "_".join([self.get_name_prefix(), os_name, phase])
|
||||
return {job_name : job_def}
|
||||
|
||||
def gen_upload_job(self, phase, requires_dependency):
|
||||
"""Generate binary_upload job for configuration
|
||||
|
||||
Output looks similar to:
|
||||
|
||||
- binary_upload:
|
||||
name: binary_linux_manywheel_3_7m_cu92_devtoolset7_nightly_upload
|
||||
context: org-member
|
||||
requires: binary_linux_manywheel_3_7m_cu92_devtoolset7_nightly_test
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- nightly
|
||||
tags:
|
||||
only: /v[0-9]+(\\.[0-9]+)*-rc[0-9]+/
|
||||
package_type: manywheel
|
||||
upload_subfolder: cu92
|
||||
"""
|
||||
return {
|
||||
"binary_upload": OrderedDict({
|
||||
"name": self.gen_build_name(phase, nightly=True),
|
||||
"context": "org-member",
|
||||
"requires": [self.gen_build_name(
|
||||
requires_dependency,
|
||||
nightly=True
|
||||
)],
|
||||
"filters": branch_filters.gen_filter_dict(
|
||||
branches_list=["nightly"],
|
||||
tags_list=[branch_filters.RC_PATTERN],
|
||||
),
|
||||
"package_type": self.pydistro,
|
||||
"upload_subfolder": binary_build_data.get_processor_arch_name(
|
||||
self.gpu_version,
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
def get_root(smoke, name):
|
||||
|
||||
return binary_build_data.TopLevelNode(
|
||||
@ -173,71 +106,64 @@ def gen_build_env_list(smoke):
|
||||
for c in config_list:
|
||||
conf = Conf(
|
||||
c.find_prop("os_name"),
|
||||
c.find_prop("gpu"),
|
||||
c.find_prop("cu"),
|
||||
c.find_prop("package_format"),
|
||||
[c.find_prop("pyver")],
|
||||
c.find_prop("smoke") and not (c.find_prop("os_name") == "macos_arm64"), # don't test arm64
|
||||
c.find_prop("smoke"),
|
||||
c.find_prop("libtorch_variant"),
|
||||
c.find_prop("gcc_config_variant"),
|
||||
c.find_prop("libtorch_config_variant"),
|
||||
)
|
||||
newlist.append(conf)
|
||||
|
||||
return newlist
|
||||
|
||||
def predicate_exclude_macos(config):
|
||||
return config.os == "linux" or config.os == "windows"
|
||||
|
||||
def predicate_exclude_nonlinux_and_libtorch(config):
|
||||
return config.os == "linux"
|
||||
|
||||
|
||||
def get_nightly_uploads():
|
||||
configs = gen_build_env_list(False)
|
||||
mylist = []
|
||||
for conf in configs:
|
||||
phase_dependency = "test" if predicate_exclude_macos(conf) else "build"
|
||||
mylist.append(conf.gen_upload_job("upload", phase_dependency))
|
||||
phase_dependency = "test" if predicate_exclude_nonlinux_and_libtorch(conf) else "build"
|
||||
mylist.append(conf.gen_workflow_job("upload", phase_dependency))
|
||||
|
||||
return mylist
|
||||
|
||||
def get_post_upload_jobs():
|
||||
return [
|
||||
{
|
||||
"update_s3_htmls": {
|
||||
"name": "update_s3_htmls",
|
||||
"context": "org-member",
|
||||
"filters": branch_filters.gen_filter_dict(
|
||||
branches_list=["postnightly"],
|
||||
),
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def get_nightly_tests():
|
||||
|
||||
configs = gen_build_env_list(False)
|
||||
filtered_configs = filter(predicate_exclude_macos, configs)
|
||||
filtered_configs = filter(predicate_exclude_nonlinux_and_libtorch, configs)
|
||||
|
||||
tests = []
|
||||
for conf_options in filtered_configs:
|
||||
yaml_item = conf_options.gen_workflow_job("test", nightly=True)
|
||||
yaml_item = conf_options.gen_workflow_job("test")
|
||||
tests.append(yaml_item)
|
||||
|
||||
return tests
|
||||
|
||||
|
||||
def get_jobs(toplevel_key, smoke):
|
||||
jobs_list = []
|
||||
def add_jobs_and_render(jobs_dict, toplevel_key, smoke, cron_schedule):
|
||||
|
||||
jobs_list = ["setup"]
|
||||
|
||||
configs = gen_build_env_list(smoke)
|
||||
phase = "build" if toplevel_key == "binarybuilds" else "test"
|
||||
for build_config in configs:
|
||||
# don't test for macos_arm64 as it's cross compiled
|
||||
if phase != "test" or build_config.os != "macos_arm64":
|
||||
jobs_list.append(build_config.gen_workflow_job(phase, nightly=True))
|
||||
jobs_list.append(build_config.gen_workflow_job(phase))
|
||||
|
||||
return jobs_list
|
||||
jobs_dict[toplevel_key] = OrderedDict(
|
||||
jobs=jobs_list,
|
||||
)
|
||||
|
||||
graph = visualization.generate_graph(get_root(smoke, toplevel_key))
|
||||
graph.draw(toplevel_key + "-config-dimensions.png", prog="twopi")
|
||||
|
||||
|
||||
def get_binary_build_jobs():
|
||||
return get_jobs("binarybuilds", False)
|
||||
def add_binary_build_jobs(jobs_dict):
|
||||
add_jobs_and_render(jobs_dict, "binarybuilds", False, "5 5 * * *")
|
||||
|
||||
|
||||
def get_binary_smoke_test_jobs():
|
||||
return get_jobs("binarysmoketests", True)
|
||||
def add_binary_smoke_test_jobs(jobs_dict):
|
||||
add_jobs_and_render(jobs_dict, "binarysmoketests", True, "15 16 * * *")
|
||||
|
||||
109
.circleci/cimodel/data/caffe2_build_data.py
Normal file
109
.circleci/cimodel/data/caffe2_build_data.py
Normal file
@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from cimodel.lib.conf_tree import ConfigNode, X, XImportant
|
||||
from cimodel.lib.conf_tree import Ver
|
||||
|
||||
|
||||
CONFIG_TREE_DATA = [
|
||||
(Ver("ubuntu", "14.04"), [
|
||||
(Ver("gcc", "4.8"), [X("py2")]),
|
||||
(Ver("gcc", "4.9"), [X("py2")]),
|
||||
]),
|
||||
(Ver("ubuntu", "16.04"), [
|
||||
(Ver("cuda", "9.0"), [
|
||||
# TODO make explicit that this is a "secret TensorRT build"
|
||||
# (see https://github.com/pytorch/pytorch/pull/17323#discussion_r259446749)
|
||||
# TODO Uh oh, were we supposed to make this one important?!
|
||||
X("py2"),
|
||||
XImportant("cmake"),
|
||||
]),
|
||||
(Ver("cuda", "10.1"), [XImportant("py3.5")]), # TensorRT 6 build
|
||||
(Ver("mkl"), [XImportant("py2")]),
|
||||
(Ver("gcc", "5"), [XImportant("onnx_py2")]),
|
||||
(Ver("clang", "3.8"), [X("py2")]),
|
||||
(Ver("clang", "3.9"), [X("py2")]),
|
||||
(Ver("clang", "7"), [XImportant("py2"), XImportant("onnx_py3.6")]),
|
||||
(Ver("android"), [XImportant("py2")]),
|
||||
]),
|
||||
(Ver("centos", "7"), [
|
||||
(Ver("cuda", "9.0"), [X("py2")]),
|
||||
]),
|
||||
(Ver("macos", "10.13"), [
|
||||
# TODO ios and system aren't related. system qualifies where the python comes
|
||||
# from (use the system python instead of homebrew or anaconda)
|
||||
(Ver("ios"), [X("py2")]),
|
||||
(Ver("system"), [XImportant("py2")]),
|
||||
]),
|
||||
]
|
||||
|
||||
|
||||
class TreeConfigNode(ConfigNode):
|
||||
def __init__(self, parent, node_name, subtree):
|
||||
super(TreeConfigNode, self).__init__(parent, self.modify_label(node_name))
|
||||
self.subtree = subtree
|
||||
self.init2(node_name)
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def modify_label(self, label):
|
||||
return str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
pass
|
||||
|
||||
def get_children(self):
|
||||
return [self.child_constructor()(self, k, v) for (k, v) in self.subtree]
|
||||
|
||||
def is_build_only(self):
|
||||
if str(self.find_prop("language_version")) == "onnx_py3.6":
|
||||
return False
|
||||
return str(self.find_prop("compiler_version")) in [
|
||||
"gcc4.9",
|
||||
"clang3.8",
|
||||
"clang3.9",
|
||||
"clang7",
|
||||
"android",
|
||||
] or self.find_prop("distro_version").name == "macos"
|
||||
|
||||
|
||||
class TopLevelNode(TreeConfigNode):
|
||||
def __init__(self, node_name, subtree):
|
||||
super(TopLevelNode, self).__init__(None, node_name, subtree)
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return DistroConfigNode
|
||||
|
||||
|
||||
class DistroConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["distro_version"] = node_name
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return CompilerConfigNode
|
||||
|
||||
|
||||
class CompilerConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["compiler_version"] = node_name
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return LanguageConfigNode
|
||||
|
||||
|
||||
class LanguageConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["language_version"] = node_name
|
||||
self.props["build_only"] = self.is_build_only()
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class ImportantConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["important"] = True
|
||||
|
||||
def get_children(self):
|
||||
return []
|
||||
162
.circleci/cimodel/data/caffe2_build_definitions.py
Normal file
162
.circleci/cimodel/data/caffe2_build_definitions.py
Normal file
@ -0,0 +1,162 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
import cimodel.data.dimensions as dimensions
|
||||
import cimodel.lib.conf_tree as conf_tree
|
||||
from cimodel.lib.conf_tree import Ver
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
from cimodel.data.caffe2_build_data import CONFIG_TREE_DATA, TopLevelNode
|
||||
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/"
|
||||
|
||||
DOCKER_IMAGE_VERSION = 324
|
||||
|
||||
|
||||
@dataclass
|
||||
class Conf:
|
||||
language: str
|
||||
distro: Ver
|
||||
compiler: Ver
|
||||
build_only: bool
|
||||
is_important: bool
|
||||
|
||||
# TODO: Eventually we can probably just remove the cudnn7 everywhere.
|
||||
def get_cudnn_insertion(self):
|
||||
|
||||
omit = self.language == "onnx_py2" \
|
||||
or self.language == "onnx_py3.6" \
|
||||
or self.compiler.name in ["android", "mkl", "clang"] \
|
||||
or str(self.distro) in ["ubuntu14.04", "macos10.13"]
|
||||
|
||||
return [] if omit else ["cudnn7"]
|
||||
|
||||
def get_build_name_root_parts(self):
|
||||
return [
|
||||
"caffe2",
|
||||
self.language,
|
||||
] + self.get_build_name_middle_parts()
|
||||
|
||||
def get_build_name_middle_parts(self):
|
||||
return [str(self.compiler)] + self.get_cudnn_insertion() + [str(self.distro)]
|
||||
|
||||
def construct_phase_name(self, phase):
|
||||
root_parts = self.get_build_name_root_parts()
|
||||
return "_".join(root_parts + [phase]).replace(".", "_")
|
||||
|
||||
def get_platform(self):
|
||||
platform = self.distro.name
|
||||
if self.distro.name != "macos":
|
||||
platform = "linux"
|
||||
return platform
|
||||
|
||||
def gen_docker_image(self):
|
||||
|
||||
lang_substitutions = {
|
||||
"onnx_py2": "py2",
|
||||
"onnx_py3.6": "py3.6",
|
||||
"cmake": "py2",
|
||||
}
|
||||
|
||||
lang = miniutils.override(self.language, lang_substitutions)
|
||||
parts = [lang] + self.get_build_name_middle_parts()
|
||||
return miniutils.quote(DOCKER_IMAGE_PATH_BASE + "-".join(parts) + ":" + str(DOCKER_IMAGE_VERSION))
|
||||
|
||||
def gen_workflow_params(self, phase):
|
||||
parameters = OrderedDict()
|
||||
lang_substitutions = {
|
||||
"onnx_py2": "onnx-py2",
|
||||
"onnx_py3.6": "onnx-py3.6",
|
||||
}
|
||||
|
||||
lang = miniutils.override(self.language, lang_substitutions)
|
||||
|
||||
parts = [
|
||||
"caffe2",
|
||||
lang,
|
||||
] + self.get_build_name_middle_parts() + [phase]
|
||||
|
||||
build_env_name = "-".join(parts)
|
||||
parameters["build_environment"] = miniutils.quote(build_env_name)
|
||||
if self.compiler.name == "ios":
|
||||
parameters["build_ios"] = miniutils.quote("1")
|
||||
if phase == "test":
|
||||
# TODO cuda should not be considered a compiler
|
||||
if self.compiler.name == "cuda":
|
||||
parameters["use_cuda_docker_runtime"] = miniutils.quote("1")
|
||||
|
||||
if self.distro.name != "macos":
|
||||
parameters["docker_image"] = self.gen_docker_image()
|
||||
if self.build_only:
|
||||
parameters["build_only"] = miniutils.quote("1")
|
||||
if phase == "test":
|
||||
resource_class = "large" if self.compiler.name != "cuda" else "gpu.medium"
|
||||
parameters["resource_class"] = resource_class
|
||||
|
||||
return parameters
|
||||
|
||||
def gen_workflow_job(self, phase):
|
||||
job_def = OrderedDict()
|
||||
job_def["name"] = self.construct_phase_name(phase)
|
||||
job_def["requires"] = ["setup"]
|
||||
|
||||
if phase == "test":
|
||||
job_def["requires"].append(self.construct_phase_name("build"))
|
||||
job_name = "caffe2_" + self.get_platform() + "_test"
|
||||
else:
|
||||
job_name = "caffe2_" + self.get_platform() + "_build"
|
||||
|
||||
if not self.is_important:
|
||||
job_def["filters"] = {"branches": {"only": ["master", r"/ci-all\/.*/"]}}
|
||||
job_def.update(self.gen_workflow_params(phase))
|
||||
return {job_name : job_def}
|
||||
|
||||
|
||||
def get_root():
|
||||
return TopLevelNode("Caffe2 Builds", CONFIG_TREE_DATA)
|
||||
|
||||
|
||||
def instantiate_configs():
|
||||
|
||||
config_list = []
|
||||
|
||||
root = get_root()
|
||||
found_configs = conf_tree.dfs(root)
|
||||
for fc in found_configs:
|
||||
|
||||
c = Conf(
|
||||
language=fc.find_prop("language_version"),
|
||||
distro=fc.find_prop("distro_version"),
|
||||
compiler=fc.find_prop("compiler_version"),
|
||||
build_only=fc.find_prop("build_only"),
|
||||
is_important=fc.find_prop("important"),
|
||||
)
|
||||
|
||||
config_list.append(c)
|
||||
|
||||
return config_list
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
|
||||
configs = instantiate_configs()
|
||||
|
||||
# TODO Why don't we build this config?
|
||||
# See https://github.com/pytorch/pytorch/pull/17323#discussion_r259450540
|
||||
filtered_configs = filter(lambda x: not (str(x.distro) == "ubuntu14.04" and str(x.compiler) == "gcc4.9"), configs)
|
||||
|
||||
x = []
|
||||
for conf_options in filtered_configs:
|
||||
|
||||
phases = ["build"]
|
||||
if not conf_options.build_only:
|
||||
phases = dimensions.PHASES
|
||||
|
||||
for phase in phases:
|
||||
x.append(conf_options.gen_workflow_job(phase))
|
||||
|
||||
return x
|
||||
@ -1,24 +1,18 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
PHASES = ["build", "test"]
|
||||
|
||||
CUDA_VERSIONS = [
|
||||
"102",
|
||||
"111",
|
||||
"113",
|
||||
None, # cpu build
|
||||
"92",
|
||||
"100",
|
||||
"101",
|
||||
]
|
||||
|
||||
ROCM_VERSIONS = [
|
||||
"4.0.1",
|
||||
"4.1",
|
||||
"4.2",
|
||||
]
|
||||
|
||||
ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
|
||||
|
||||
GPU_VERSIONS = [None] + ["cuda" + v for v in CUDA_VERSIONS] + ROCM_VERSION_LABELS
|
||||
|
||||
STANDARD_PYTHON_VERSIONS = [
|
||||
"2.7",
|
||||
"3.5",
|
||||
"3.6",
|
||||
"3.7",
|
||||
"3.8",
|
||||
"3.9"
|
||||
]
|
||||
|
||||
@ -1,101 +1,66 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from cimodel.lib.conf_tree import ConfigNode, X, XImportant
|
||||
|
||||
|
||||
CONFIG_TREE_DATA = [
|
||||
("xenial", [
|
||||
(None, [
|
||||
XImportant("2.7.9"),
|
||||
X("2.7"),
|
||||
XImportant("3.5"), # Not run on all PRs, but should be included on [test all]
|
||||
X("nightly"),
|
||||
]),
|
||||
("gcc", [
|
||||
("4.8", [X("3.6")]),
|
||||
("5.4", [ # All this subtree rebases to master and then build
|
||||
XImportant("3.6"),
|
||||
("3.6", [
|
||||
("important", [X(True)]),
|
||||
("parallel_tbb", [X(True)]),
|
||||
("parallel_native", [X(True)]),
|
||||
("pure_torch", [X(True)]),
|
||||
("namedtensor", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
# TODO: bring back libtorch test
|
||||
("7", [X("3.6")]),
|
||||
]),
|
||||
("clang", [
|
||||
("5", [
|
||||
XImportant("3.6"), # This is actually the ASAN build
|
||||
("3.6", [
|
||||
("asan", [
|
||||
(True, [
|
||||
("shard_test", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
("namedtensor", [XImportant(True)]), # ASAN
|
||||
]),
|
||||
]),
|
||||
("7", [
|
||||
("3.6", [
|
||||
("onnx", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
("cuda", [
|
||||
("10.2", [
|
||||
("3.6", [
|
||||
("shard_test", [X(True)]),
|
||||
("slow_gradcheck", [
|
||||
(True, [
|
||||
('shard_test', [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
("libtorch", [
|
||||
(True, [
|
||||
('build_only', [X(True)]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
("11.1", [
|
||||
("3.8", [
|
||||
("shard_test", [XImportant(True)]),
|
||||
("libtorch", [
|
||||
(True, [
|
||||
('build_only', [X(True)]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
("bionic", [
|
||||
("clang", [
|
||||
("9", [
|
||||
("3.6", [
|
||||
("noarch", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
("9", [
|
||||
("3.6", [
|
||||
("xla", [XImportant(True)]),
|
||||
("vulkan", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
("cuda", [
|
||||
("10.2", [
|
||||
("3.9", [
|
||||
("shard_test", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
("gcc", [
|
||||
("9", [
|
||||
("3.8", [
|
||||
("coverage", [
|
||||
(True, [
|
||||
("shard_test", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
# Note there are magic strings here
|
||||
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L21
|
||||
# and
|
||||
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L143
|
||||
# and
|
||||
# https://github.com/pytorch/pytorch/blob/master/.jenkins/pytorch/build.sh#L153
|
||||
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453144)
|
||||
X("2.7"),
|
||||
XImportant("3.6"),
|
||||
("2.7", [
|
||||
("namedtensor", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
("9.2", [X("3.6")]),
|
||||
("10", [X("3.6")]),
|
||||
("10.1", [X("3.6")]),
|
||||
]),
|
||||
("rocm", [
|
||||
("3.9", [
|
||||
("android", [
|
||||
("r19c", [
|
||||
("3.6", [
|
||||
('build_only', [XImportant(True)]),
|
||||
]),
|
||||
("android_abi", [XImportant("x86_32")]),
|
||||
("android_abi", [X("x86_64")]),
|
||||
("android_abi", [X("arm-v7a")]),
|
||||
("android_abi", [X("arm-v8a")]),
|
||||
])
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
@ -141,7 +106,6 @@ class DistroConfigNode(TreeConfigNode):
|
||||
|
||||
next_nodes = {
|
||||
"xenial": XenialCompilerConfigNode,
|
||||
"bionic": BionicCompilerConfigNode,
|
||||
}
|
||||
return next_nodes[distro]
|
||||
|
||||
@ -150,8 +114,6 @@ class PyVerConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["pyver"] = node_name
|
||||
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
|
||||
if node_name == "3.9":
|
||||
self.props["abbreviated_pyver"] = "py3.9"
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
@ -166,44 +128,14 @@ class ExperimentalFeatureConfigNode(TreeConfigNode):
|
||||
experimental_feature = self.find_prop("experimental_feature")
|
||||
|
||||
next_nodes = {
|
||||
"asan": AsanConfigNode,
|
||||
"xla": XlaConfigNode,
|
||||
"mlc": MLCConfigNode,
|
||||
"vulkan": VulkanConfigNode,
|
||||
"parallel_tbb": ParallelTBBConfigNode,
|
||||
"noarch": NoarchConfigNode,
|
||||
"parallel_native": ParallelNativeConfigNode,
|
||||
"onnx": ONNXConfigNode,
|
||||
"libtorch": LibTorchConfigNode,
|
||||
"namedtensor": NamedTensorConfigNode,
|
||||
"important": ImportantConfigNode,
|
||||
"build_only": BuildOnlyConfigNode,
|
||||
"shard_test": ShardTestConfigNode,
|
||||
"cuda_gcc_override": CudaGccOverrideConfigNode,
|
||||
"coverage": CoverageConfigNode,
|
||||
"pure_torch": PureTorchConfigNode,
|
||||
"slow_gradcheck": SlowGradcheckConfigNode,
|
||||
"android_abi": AndroidAbiConfigNode,
|
||||
}
|
||||
return next_nodes[experimental_feature]
|
||||
|
||||
|
||||
class SlowGradcheckConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_slow_gradcheck"] = True
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
class PureTorchConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "PURE_TORCH=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_pure_torch"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class XlaConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "XLA=" + str(label)
|
||||
@ -214,123 +146,25 @@ class XlaConfigNode(TreeConfigNode):
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
class MLCConfigNode(TreeConfigNode):
|
||||
|
||||
class NamedTensorConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "MLC=" + str(label)
|
||||
return "NAMEDTENSOR=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_mlc"] = node_name
|
||||
self.props["is_namedtensor"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class AsanConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "Asan=" + str(label)
|
||||
class AndroidAbiConfigNode(TreeConfigNode):
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_asan"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class ONNXConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "Onnx=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_onnx"] = node_name
|
||||
self.props["android_abi"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class VulkanConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "Vulkan=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_vulkan"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class ParallelTBBConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "PARALLELTBB=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["parallel_backend"] = "paralleltbb"
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class NoarchConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_noarch"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class ParallelNativeConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "PARALLELNATIVE=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["parallel_backend"] = "parallelnative"
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class LibTorchConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "BUILD_TEST_LIBTORCH=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_libtorch"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class CudaGccOverrideConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["cuda_gcc_override"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class BuildOnlyConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["build_only"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class ShardTestConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["shard_test"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class CoverageConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_coverage"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class ImportantConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "IMPORTANT=" + str(label)
|
||||
@ -343,6 +177,7 @@ class ImportantConfigNode(TreeConfigNode):
|
||||
|
||||
|
||||
class XenialCompilerConfigNode(TreeConfigNode):
|
||||
|
||||
def modify_label(self, label):
|
||||
return label or "<unspecified>"
|
||||
|
||||
@ -355,19 +190,6 @@ class XenialCompilerConfigNode(TreeConfigNode):
|
||||
return XenialCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
|
||||
|
||||
|
||||
class BionicCompilerConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return label or "<unspecified>"
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["compiler_name"] = node_name
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
|
||||
return BionicCompilerVersionConfigNode if self.props["compiler_name"] else PyVerConfigNode
|
||||
|
||||
|
||||
class XenialCompilerVersionConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["compiler_version"] = node_name
|
||||
@ -375,12 +197,3 @@ class XenialCompilerVersionConfigNode(TreeConfigNode):
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return PyVerConfigNode
|
||||
|
||||
|
||||
class BionicCompilerVersionConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["compiler_version"] = node_name
|
||||
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def child_constructor(self):
|
||||
return PyVerConfigNode
|
||||
|
||||
@ -1,13 +1,21 @@
|
||||
from collections import OrderedDict
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from cimodel.data.pytorch_build_data import TopLevelNode, CONFIG_TREE_DATA
|
||||
import cimodel.data.dimensions as dimensions
|
||||
import cimodel.lib.conf_tree as conf_tree
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
from cimodel.data.pytorch_build_data import CONFIG_TREE_DATA, TopLevelNode
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
|
||||
from cimodel.data.simple.util.docker_constants import gen_docker_image
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/"
|
||||
|
||||
# ARE YOU EDITING THIS NUMBER? MAKE SURE YOU READ THE GUIDANCE AT THE
|
||||
# TOP OF .circleci/config.yml
|
||||
DOCKER_IMAGE_VERSION = 347
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -17,24 +25,16 @@ class Conf:
|
||||
parms_list_ignored_for_docker_image: Optional[List[str]] = None
|
||||
pyver: Optional[str] = None
|
||||
cuda_version: Optional[str] = None
|
||||
rocm_version: Optional[str] = None
|
||||
# TODO expand this to cover all the USE_* that we want to test for
|
||||
# tesnrorrt, leveldb, lmdb, redis, opencv, mkldnn, ideep, etc.
|
||||
# (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259453608)
|
||||
is_xla: bool = False
|
||||
is_vulkan: bool = False
|
||||
is_pure_torch: bool = False
|
||||
restrict_phases: Optional[List[str]] = None
|
||||
gpu_resource: Optional[str] = None
|
||||
dependent_tests: List = field(default_factory=list)
|
||||
parent_build: Optional["Conf"] = None
|
||||
is_libtorch: bool = False
|
||||
parent_build: Optional['Conf'] = None
|
||||
is_namedtensor: bool = False
|
||||
is_important: bool = False
|
||||
parallel_backend: Optional[str] = None
|
||||
|
||||
@staticmethod
|
||||
def is_test_phase(phase):
|
||||
return "test" in phase
|
||||
|
||||
# TODO: Eliminate the special casing for docker paths
|
||||
# In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch
|
||||
@ -47,47 +47,29 @@ class Conf:
|
||||
leading.append("pytorch")
|
||||
if self.is_xla and not for_docker:
|
||||
leading.append("xla")
|
||||
if self.is_vulkan and not for_docker:
|
||||
leading.append("vulkan")
|
||||
if self.is_libtorch and not for_docker:
|
||||
leading.append("libtorch")
|
||||
if self.is_pure_torch and not for_docker:
|
||||
leading.append("pure_torch")
|
||||
if self.parallel_backend is not None and not for_docker:
|
||||
leading.append(self.parallel_backend)
|
||||
if self.is_namedtensor and not for_docker:
|
||||
leading.append("namedtensor")
|
||||
|
||||
cuda_parms = []
|
||||
if self.cuda_version:
|
||||
cudnn = "cudnn8" if self.cuda_version.startswith("11.") else "cudnn7"
|
||||
cuda_parms.extend(["cuda" + self.cuda_version, cudnn])
|
||||
if self.rocm_version:
|
||||
cuda_parms.extend([f"rocm{self.rocm_version}"])
|
||||
cuda_parms.extend(["cuda" + self.cuda_version, "cudnn7"])
|
||||
result = leading + ["linux", self.distro] + cuda_parms + self.parms
|
||||
if not for_docker and self.parms_list_ignored_for_docker_image is not None:
|
||||
if (not for_docker and self.parms_list_ignored_for_docker_image is not None):
|
||||
result = result + self.parms_list_ignored_for_docker_image
|
||||
return result
|
||||
|
||||
def gen_docker_image_path(self):
|
||||
parms_source = self.parent_build or self
|
||||
base_build_env_name = "-".join(parms_source.get_parms(True))
|
||||
image_name, _ = gen_docker_image(base_build_env_name)
|
||||
return miniutils.quote(image_name)
|
||||
|
||||
def gen_docker_image_requires(self):
|
||||
parms_source = self.parent_build or self
|
||||
base_build_env_name = "-".join(parms_source.get_parms(True))
|
||||
_, requires = gen_docker_image(base_build_env_name)
|
||||
return miniutils.quote(requires)
|
||||
|
||||
return miniutils.quote(DOCKER_IMAGE_PATH_BASE + base_build_env_name + ":" + str(DOCKER_IMAGE_VERSION))
|
||||
|
||||
def get_build_job_name_pieces(self, build_or_test):
|
||||
return self.get_parms(False) + [build_or_test]
|
||||
|
||||
def gen_build_name(self, build_or_test):
|
||||
return (
|
||||
("_".join(map(str, self.get_build_job_name_pieces(build_or_test))))
|
||||
.replace(".", "_")
|
||||
.replace("-", "_")
|
||||
)
|
||||
return ("_".join(map(str, self.get_build_job_name_pieces(build_or_test)))).replace(".", "_").replace("-", "_")
|
||||
|
||||
def get_dependents(self):
|
||||
return self.dependent_tests or []
|
||||
@ -99,26 +81,22 @@ class Conf:
|
||||
build_env_name = "-".join(map(str, build_job_name_pieces))
|
||||
parameters["build_environment"] = miniutils.quote(build_env_name)
|
||||
parameters["docker_image"] = self.gen_docker_image_path()
|
||||
if Conf.is_test_phase(phase) and self.gpu_resource:
|
||||
if phase == "test" and self.gpu_resource:
|
||||
parameters["use_cuda_docker_runtime"] = miniutils.quote("1")
|
||||
if Conf.is_test_phase(phase):
|
||||
if phase == "test":
|
||||
resource_class = "large"
|
||||
if self.gpu_resource:
|
||||
resource_class = "gpu." + self.gpu_resource
|
||||
if self.rocm_version is not None:
|
||||
resource_class = "pytorch/amd-gpu"
|
||||
parameters["resource_class"] = resource_class
|
||||
if phase == "build" and self.rocm_version is not None:
|
||||
parameters["resource_class"] = "xlarge"
|
||||
if hasattr(self, 'filters'):
|
||||
parameters['filters'] = self.filters
|
||||
return parameters
|
||||
|
||||
def gen_workflow_job(self, phase):
|
||||
# All jobs require the setup job
|
||||
job_def = OrderedDict()
|
||||
job_def["name"] = self.gen_build_name(phase)
|
||||
job_def["requires"] = ["setup"]
|
||||
|
||||
if Conf.is_test_phase(phase):
|
||||
if phase == "test":
|
||||
|
||||
# TODO When merging the caffe2 and pytorch jobs, it might be convenient for a while to make a
|
||||
# caffe2 test job dependent on a pytorch build job. This way we could quickly dedup the repeated
|
||||
@ -126,63 +104,43 @@ class Conf:
|
||||
# pytorch build job (from https://github.com/pytorch/pytorch/pull/17323#discussion_r259452641)
|
||||
|
||||
dependency_build = self.parent_build or self
|
||||
job_def["requires"] = [dependency_build.gen_build_name("build")]
|
||||
job_def["requires"].append(dependency_build.gen_build_name("build"))
|
||||
job_name = "pytorch_linux_test"
|
||||
else:
|
||||
job_name = "pytorch_linux_build"
|
||||
job_def["requires"] = [self.gen_docker_image_requires()]
|
||||
|
||||
|
||||
if not self.is_important:
|
||||
job_def["filters"] = gen_filter_dict()
|
||||
# If you update this, update
|
||||
# caffe2_build_definitions.py too
|
||||
job_def["filters"] = {"branches": {"only": ["master", r"/ci-all\/.*/"]}}
|
||||
job_def.update(self.gen_workflow_params(phase))
|
||||
|
||||
return {job_name: job_def}
|
||||
return {job_name : job_def}
|
||||
|
||||
|
||||
# TODO This is a hack to special case some configs just for the workflow list
|
||||
class HiddenConf(object):
|
||||
def __init__(self, name, parent_build=None, filters=None):
|
||||
def __init__(self, name, parent_build=None):
|
||||
self.name = name
|
||||
self.parent_build = parent_build
|
||||
self.filters = filters
|
||||
|
||||
def gen_workflow_job(self, phase):
|
||||
return {
|
||||
self.gen_build_name(phase): {
|
||||
"requires": [self.parent_build.gen_build_name("build")],
|
||||
"filters": self.filters,
|
||||
}
|
||||
}
|
||||
return {self.gen_build_name(phase): {"requires": [self.parent_build.gen_build_name("build")]}}
|
||||
|
||||
def gen_build_name(self, _):
|
||||
return self.name
|
||||
|
||||
class DocPushConf(object):
|
||||
def __init__(self, name, parent_build=None, branch="master"):
|
||||
self.name = name
|
||||
self.parent_build = parent_build
|
||||
self.branch = branch
|
||||
|
||||
def gen_workflow_job(self, phase):
|
||||
return {
|
||||
"pytorch_doc_push": {
|
||||
"name": self.name,
|
||||
"branch": self.branch,
|
||||
"requires": [self.parent_build],
|
||||
"context": "org-member",
|
||||
"filters": gen_filter_dict(branches_list=["nightly"],
|
||||
tags_list=RC_PATTERN)
|
||||
}
|
||||
}
|
||||
|
||||
# TODO Convert these to graph nodes
|
||||
def gen_dependent_configs(xenial_parent_config):
|
||||
|
||||
extra_parms = [
|
||||
(["multigpu"], "large"),
|
||||
(["nogpu", "NO_AVX2"], None),
|
||||
(["nogpu", "NO_AVX"], None),
|
||||
(["NO_AVX2"], "medium"),
|
||||
(["NO_AVX", "NO_AVX2"], "medium"),
|
||||
(["slow"], "medium"),
|
||||
(["nogpu"], None),
|
||||
]
|
||||
|
||||
configs = []
|
||||
@ -191,60 +149,19 @@ def gen_dependent_configs(xenial_parent_config):
|
||||
c = Conf(
|
||||
xenial_parent_config.distro,
|
||||
["py3"] + parms,
|
||||
pyver=xenial_parent_config.pyver,
|
||||
pyver="3.6",
|
||||
cuda_version=xenial_parent_config.cuda_version,
|
||||
restrict_phases=["test"],
|
||||
gpu_resource=gpu,
|
||||
parent_build=xenial_parent_config,
|
||||
is_important=False,
|
||||
is_important=xenial_parent_config.is_important,
|
||||
)
|
||||
|
||||
configs.append(c)
|
||||
|
||||
return configs
|
||||
for x in ["pytorch_short_perf_test_gpu", "pytorch_python_doc_push", "pytorch_cpp_doc_push"]:
|
||||
configs.append(HiddenConf(x, parent_build=xenial_parent_config))
|
||||
|
||||
|
||||
def gen_docs_configs(xenial_parent_config):
|
||||
configs = []
|
||||
|
||||
configs.append(
|
||||
HiddenConf(
|
||||
"pytorch_python_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(branches_list=r"/.*/",
|
||||
tags_list=RC_PATTERN),
|
||||
)
|
||||
)
|
||||
configs.append(
|
||||
DocPushConf(
|
||||
"pytorch_python_doc_push",
|
||||
parent_build="pytorch_python_doc_build",
|
||||
branch="site",
|
||||
)
|
||||
)
|
||||
|
||||
configs.append(
|
||||
HiddenConf(
|
||||
"pytorch_cpp_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(branches_list=r"/.*/",
|
||||
tags_list=RC_PATTERN),
|
||||
)
|
||||
)
|
||||
configs.append(
|
||||
DocPushConf(
|
||||
"pytorch_cpp_doc_push",
|
||||
parent_build="pytorch_cpp_doc_build",
|
||||
branch="master",
|
||||
)
|
||||
)
|
||||
|
||||
configs.append(
|
||||
HiddenConf(
|
||||
"pytorch_doc_test",
|
||||
parent_build=xenial_parent_config
|
||||
)
|
||||
)
|
||||
return configs
|
||||
|
||||
|
||||
@ -258,31 +175,21 @@ def gen_tree():
|
||||
return configs_list
|
||||
|
||||
|
||||
def instantiate_configs(only_slow_gradcheck):
|
||||
def instantiate_configs():
|
||||
|
||||
config_list = []
|
||||
|
||||
root = get_root()
|
||||
found_configs = conf_tree.dfs(root)
|
||||
restrict_phases = None
|
||||
for fc in found_configs:
|
||||
|
||||
restrict_phases = None
|
||||
distro_name = fc.find_prop("distro_name")
|
||||
compiler_name = fc.find_prop("compiler_name")
|
||||
compiler_version = fc.find_prop("compiler_version")
|
||||
is_xla = fc.find_prop("is_xla") or False
|
||||
is_asan = fc.find_prop("is_asan") or False
|
||||
is_coverage = fc.find_prop("is_coverage") or False
|
||||
is_noarch = fc.find_prop("is_noarch") or False
|
||||
is_onnx = fc.find_prop("is_onnx") or False
|
||||
is_pure_torch = fc.find_prop("is_pure_torch") or False
|
||||
is_vulkan = fc.find_prop("is_vulkan") or False
|
||||
is_slow_gradcheck = fc.find_prop("is_slow_gradcheck") or False
|
||||
parms_list_ignored_for_docker_image = []
|
||||
|
||||
if only_slow_gradcheck ^ is_slow_gradcheck:
|
||||
continue
|
||||
|
||||
python_version = None
|
||||
if compiler_name == "cuda" or compiler_name == "android":
|
||||
python_version = fc.find_prop("pyver")
|
||||
@ -291,14 +198,9 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
parms_list = ["py" + fc.find_prop("pyver")]
|
||||
|
||||
cuda_version = None
|
||||
rocm_version = None
|
||||
if compiler_name == "cuda":
|
||||
cuda_version = fc.find_prop("compiler_version")
|
||||
|
||||
elif compiler_name == "rocm":
|
||||
rocm_version = fc.find_prop("compiler_version")
|
||||
restrict_phases = ["build", "test1", "test2", "caffe2_test"]
|
||||
|
||||
elif compiler_name == "android":
|
||||
android_ndk_version = fc.find_prop("compiler_version")
|
||||
# TODO: do we need clang to compile host binaries like protoc?
|
||||
@ -312,43 +214,18 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
gcc_version = compiler_name + (fc.find_prop("compiler_version") or "")
|
||||
parms_list.append(gcc_version)
|
||||
|
||||
if is_asan:
|
||||
parms_list.append("asan")
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list[0] = fc.find_prop("abbreviated_pyver")
|
||||
# TODO: This is a nasty special case
|
||||
if compiler_name == "clang" and not is_xla:
|
||||
parms_list.append("asan")
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list[0] = fc.find_prop("abbreviated_pyver")
|
||||
|
||||
if is_coverage:
|
||||
parms_list_ignored_for_docker_image.append("coverage")
|
||||
python_version = fc.find_prop("pyver")
|
||||
if cuda_version in ["9.2", "10", "10.1"]:
|
||||
# TODO The gcc version is orthogonal to CUDA version?
|
||||
parms_list.append("gcc7")
|
||||
|
||||
if is_noarch:
|
||||
parms_list_ignored_for_docker_image.append("noarch")
|
||||
|
||||
if is_onnx:
|
||||
parms_list.append("onnx")
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list[0] = fc.find_prop("abbreviated_pyver")
|
||||
restrict_phases = ["build", "ort_test1", "ort_test2"]
|
||||
|
||||
if cuda_version:
|
||||
cuda_gcc_version = fc.find_prop("cuda_gcc_override") or "gcc7"
|
||||
parms_list.append(cuda_gcc_version)
|
||||
|
||||
is_libtorch = fc.find_prop("is_libtorch") or False
|
||||
is_namedtensor = fc.find_prop("is_namedtensor") or False
|
||||
is_important = fc.find_prop("is_important") or False
|
||||
parallel_backend = fc.find_prop("parallel_backend") or None
|
||||
build_only = fc.find_prop("build_only") or False
|
||||
shard_test = fc.find_prop("shard_test") or False
|
||||
# TODO: fix pure_torch python test packaging issue.
|
||||
if shard_test:
|
||||
restrict_phases = ["build"] if restrict_phases is None else restrict_phases
|
||||
restrict_phases.extend(["test1", "test2"])
|
||||
if build_only or is_pure_torch:
|
||||
restrict_phases = ["build"]
|
||||
|
||||
if is_slow_gradcheck:
|
||||
parms_list_ignored_for_docker_image.append("old")
|
||||
parms_list_ignored_for_docker_image.append("gradcheck")
|
||||
|
||||
gpu_resource = None
|
||||
if cuda_version and cuda_version != "10":
|
||||
@ -360,52 +237,25 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
parms_list_ignored_for_docker_image,
|
||||
python_version,
|
||||
cuda_version,
|
||||
rocm_version,
|
||||
is_xla,
|
||||
is_vulkan,
|
||||
is_pure_torch,
|
||||
restrict_phases,
|
||||
gpu_resource,
|
||||
is_libtorch=is_libtorch,
|
||||
is_namedtensor=is_namedtensor,
|
||||
is_important=is_important,
|
||||
parallel_backend=parallel_backend,
|
||||
)
|
||||
|
||||
# run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds
|
||||
# should run on a CPU-only build that runs on all PRs.
|
||||
# XXX should this be updated to a more modern build? Projects are
|
||||
# beginning to drop python3.6
|
||||
if (
|
||||
distro_name == "xenial"
|
||||
and fc.find_prop("pyver") == "3.6"
|
||||
and cuda_version is None
|
||||
and parallel_backend is None
|
||||
and not is_vulkan
|
||||
and not is_pure_torch
|
||||
and compiler_name == "gcc"
|
||||
and fc.find_prop("compiler_version") == "5.4"
|
||||
):
|
||||
c.filters = gen_filter_dict(branches_list=r"/.*/",
|
||||
tags_list=RC_PATTERN)
|
||||
c.dependent_tests = gen_docs_configs(c)
|
||||
|
||||
if cuda_version == "10.2" and python_version == "3.6" and not is_libtorch and not is_slow_gradcheck:
|
||||
if cuda_version == "9" and python_version == "3.6":
|
||||
c.dependent_tests = gen_dependent_configs(c)
|
||||
|
||||
if (
|
||||
compiler_name == "gcc"
|
||||
and compiler_version == "5.4"
|
||||
and not is_libtorch
|
||||
and not is_vulkan
|
||||
and not is_pure_torch
|
||||
and parallel_backend is None
|
||||
):
|
||||
if (compiler_name == "gcc"
|
||||
and compiler_version == "5.4"
|
||||
and not is_namedtensor):
|
||||
bc_breaking_check = Conf(
|
||||
"backward-compatibility-check",
|
||||
[],
|
||||
is_xla=False,
|
||||
restrict_phases=["test"],
|
||||
is_libtorch=False,
|
||||
is_namedtensor=False,
|
||||
is_important=True,
|
||||
parent_build=c,
|
||||
)
|
||||
@ -416,11 +266,11 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
return config_list
|
||||
|
||||
|
||||
def get_workflow_jobs(only_slow_gradcheck=False):
|
||||
def get_workflow_jobs():
|
||||
|
||||
config_list = instantiate_configs(only_slow_gradcheck)
|
||||
config_list = instantiate_configs()
|
||||
|
||||
x = []
|
||||
x = ["setup"]
|
||||
for conf_options in config_list:
|
||||
|
||||
phases = conf_options.restrict_phases or dimensions.PHASES
|
||||
@ -428,7 +278,7 @@ def get_workflow_jobs(only_slow_gradcheck=False):
|
||||
for phase in phases:
|
||||
|
||||
# TODO why does this not have a test?
|
||||
if Conf.is_test_phase(phase) and conf_options.cuda_version == "10":
|
||||
if phase == "test" and conf_options.cuda_version == "10":
|
||||
continue
|
||||
|
||||
x.append(conf_options.gen_workflow_job(phase))
|
||||
|
||||
@ -1,28 +0,0 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict
|
||||
from cimodel.lib.miniutils import quote
|
||||
|
||||
|
||||
CHANNELS_TO_PRUNE = ["pytorch-nightly", "pytorch-test"]
|
||||
PACKAGES_TO_PRUNE = "pytorch torchvision torchaudio torchtext ignite torchcsprng"
|
||||
|
||||
|
||||
def gen_workflow_job(channel: str):
|
||||
return OrderedDict(
|
||||
{
|
||||
"anaconda_prune": OrderedDict(
|
||||
{
|
||||
"name": f"anaconda-prune-{channel}",
|
||||
"context": quote("org-member"),
|
||||
"packages": quote(PACKAGES_TO_PRUNE),
|
||||
"channel": channel,
|
||||
"filters": gen_filter_dict(branches_list=["postnightly"]),
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [gen_workflow_job(channel) for channel in CHANNELS_TO_PRUNE]
|
||||
@ -1,119 +0,0 @@
|
||||
import cimodel.data.simple.util.branch_filters as branch_filters
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
|
||||
class AndroidJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
template_name,
|
||||
is_master_only=True):
|
||||
|
||||
self.variant = variant
|
||||
self.template_name = template_name
|
||||
self.is_master_only = is_master_only
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"android",
|
||||
"ndk",
|
||||
"r19c",
|
||||
] + self.variant + [
|
||||
"build",
|
||||
]
|
||||
|
||||
full_job_name = "_".join(base_name_parts)
|
||||
build_env_name = "-".join(base_name_parts)
|
||||
|
||||
props_dict = {
|
||||
"name": full_job_name,
|
||||
"build_environment": "\"{}\"".format(build_env_name),
|
||||
"docker_image": "\"{}\"".format(DOCKER_IMAGE_NDK),
|
||||
"requires": [DOCKER_REQUIREMENT_NDK]
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
class AndroidGradleJob:
|
||||
def __init__(self,
|
||||
job_name,
|
||||
template_name,
|
||||
dependencies,
|
||||
is_master_only=True,
|
||||
is_pr_only=False,
|
||||
extra_props=tuple()):
|
||||
|
||||
self.job_name = job_name
|
||||
self.template_name = template_name
|
||||
self.dependencies = dependencies
|
||||
self.is_master_only = is_master_only
|
||||
self.is_pr_only = is_pr_only
|
||||
self.extra_props = dict(extra_props)
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
props_dict = {
|
||||
"name": self.job_name,
|
||||
"requires": self.dependencies,
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
||||
elif self.is_pr_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.PR_BRANCH_LIST)
|
||||
if self.extra_props:
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
AndroidJob(["x86_32"], "pytorch_linux_build", is_master_only=False),
|
||||
AndroidJob(["x86_64"], "pytorch_linux_build"),
|
||||
AndroidJob(["arm", "v7a"], "pytorch_linux_build"),
|
||||
AndroidJob(["arm", "v8a"], "pytorch_linux_build"),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build-x86_32",
|
||||
"pytorch_android_gradle_build-x86_32",
|
||||
["pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build"],
|
||||
is_master_only=False,
|
||||
is_pr_only=True),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single",
|
||||
"pytorch_android_gradle_custom_build_single",
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
is_master_only=False,
|
||||
is_pr_only=True),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single-full-jit",
|
||||
"pytorch_android_gradle_custom_build_single",
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
is_master_only=False,
|
||||
is_pr_only=True,
|
||||
extra_props=tuple({
|
||||
"lite_interpreter": miniutils.quote(str(int(False)))
|
||||
}.items())),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build",
|
||||
"pytorch_android_gradle_build",
|
||||
["pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build"]),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,69 +0,0 @@
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_GCC7,
|
||||
DOCKER_REQUIREMENT_GCC7
|
||||
)
|
||||
|
||||
|
||||
def gen_job_name(phase):
|
||||
job_name_parts = [
|
||||
"pytorch",
|
||||
"bazel",
|
||||
phase,
|
||||
]
|
||||
|
||||
return "_".join(job_name_parts)
|
||||
|
||||
|
||||
class BazelJob:
|
||||
def __init__(self, phase, extra_props=None):
|
||||
self.phase = phase
|
||||
self.extra_props = extra_props or {}
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
template_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"bazel",
|
||||
self.phase,
|
||||
]
|
||||
|
||||
build_env_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3.6",
|
||||
"gcc7",
|
||||
"bazel",
|
||||
self.phase,
|
||||
]
|
||||
|
||||
full_job_name = gen_job_name(self.phase)
|
||||
build_env_name = "-".join(build_env_parts)
|
||||
|
||||
extra_requires = (
|
||||
[gen_job_name("build")] if self.phase == "test" else
|
||||
[DOCKER_REQUIREMENT_GCC7]
|
||||
)
|
||||
|
||||
props_dict = {
|
||||
"build_environment": build_env_name,
|
||||
"docker_image": DOCKER_IMAGE_GCC7,
|
||||
"name": full_job_name,
|
||||
"requires": extra_requires,
|
||||
}
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
template_name = "_".join(template_parts)
|
||||
return [{template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
BazelJob("build", {"resource_class": "large"}),
|
||||
BazelJob("test"),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,193 +0,0 @@
|
||||
"""
|
||||
TODO: Refactor circleci/cimodel/data/binary_build_data.py to generate this file
|
||||
instead of doing one offs here
|
||||
Binary builds (subset, to smoke test that they'll work)
|
||||
|
||||
NB: If you modify this file, you need to also modify
|
||||
the binary_and_smoke_tests_on_pr variable in
|
||||
pytorch-ci-hud to adjust the allowed build list
|
||||
at https://github.com/ezyang/pytorch-ci-hud/blob/master/src/BuildHistoryDisplay.js
|
||||
|
||||
Note:
|
||||
This binary build is currently broken, see https://github_com/pytorch/pytorch/issues/16710
|
||||
- binary_linux_conda_3_6_cu90_devtoolset7_build
|
||||
- binary_linux_conda_3_6_cu90_devtoolset7_test
|
||||
|
||||
TODO
|
||||
we should test a libtorch cuda build, but they take too long
|
||||
- binary_linux_libtorch_3_6m_cu90_devtoolset7_static-without-deps_build
|
||||
"""
|
||||
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.data.simple.util.branch_filters
|
||||
|
||||
|
||||
class SmoketestJob:
|
||||
def __init__(self,
|
||||
template_name,
|
||||
build_env_parts,
|
||||
docker_image,
|
||||
job_name,
|
||||
is_master_only=False,
|
||||
requires=None,
|
||||
has_libtorch_variant=False,
|
||||
extra_props=None):
|
||||
|
||||
self.template_name = template_name
|
||||
self.build_env_parts = build_env_parts
|
||||
self.docker_image = docker_image
|
||||
self.job_name = job_name
|
||||
self.is_master_only = is_master_only
|
||||
self.requires = requires or []
|
||||
self.has_libtorch_variant = has_libtorch_variant
|
||||
self.extra_props = extra_props or {}
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
props_dict = {
|
||||
"build_environment": " ".join(self.build_env_parts),
|
||||
"name": self.job_name,
|
||||
"requires": self.requires,
|
||||
}
|
||||
|
||||
if self.docker_image:
|
||||
props_dict["docker_image"] = self.docker_image
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
|
||||
|
||||
if self.has_libtorch_variant:
|
||||
props_dict["libtorch_variant"] = "shared-with-deps"
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["manywheel", "3.7m", "cu102", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_manywheel_3_7m_cu102_devtoolset7_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build",
|
||||
is_master_only=True,
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
||||
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
||||
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build",
|
||||
is_master_only=False,
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_mac_build",
|
||||
["wheel", "3.7", "cpu"],
|
||||
None,
|
||||
"binary_macos_wheel_3_7_cpu_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
# This job has an average run time of 3 hours o.O
|
||||
# Now only running this on master to reduce overhead
|
||||
SmoketestJob(
|
||||
"binary_mac_build",
|
||||
["libtorch", "3.7", "cpu"],
|
||||
None,
|
||||
"binary_macos_libtorch_3_7_cpu_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["libtorch", "3.7", "cpu", "debug"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_debug_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["libtorch", "3.7", "cpu", "release"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_release_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["wheel", "3.7", "cu102"],
|
||||
None,
|
||||
"binary_windows_wheel_3_7_cu102_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["libtorch", "3.7", "cpu", "debug"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_debug_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_windows_libtorch_3_7_cpu_debug_build"],
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["libtorch", "3.7", "cpu", "release"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_release_test",
|
||||
is_master_only=False,
|
||||
requires=["binary_windows_libtorch_3_7_cpu_release_build"],
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["wheel", "3.7", "cu102"],
|
||||
None,
|
||||
"binary_windows_wheel_3_7_cu102_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_windows_wheel_3_7_cu102_build"],
|
||||
extra_props={
|
||||
"executor": "windows-with-nvidia-gpu",
|
||||
},
|
||||
),
|
||||
|
||||
|
||||
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["manywheel", "3.7m", "cu102", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_manywheel_3_7m_cu102_devtoolset7_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_manywheel_3_7m_cu102_devtoolset7_build"],
|
||||
extra_props={
|
||||
"resource_class": "gpu.medium",
|
||||
"use_cuda_docker_runtime": miniutils.quote((str(1))),
|
||||
},
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build"],
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
||||
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
||||
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build"],
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,52 +0,0 @@
|
||||
from collections import OrderedDict
|
||||
|
||||
from cimodel.lib.miniutils import quote
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
|
||||
|
||||
|
||||
# TODO: make this generated from a matrix rather than just a static list
|
||||
IMAGE_NAMES = [
|
||||
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.8-gcc9",
|
||||
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7",
|
||||
"pytorch-linux-bionic-py3.6-clang9",
|
||||
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9",
|
||||
"pytorch-linux-bionic-py3.8-gcc9",
|
||||
"pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7",
|
||||
"pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7",
|
||||
"pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
|
||||
"pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
|
||||
"pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7",
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
||||
"pytorch-linux-xenial-py3-clang5-asan",
|
||||
"pytorch-linux-xenial-py3-clang7-onnx",
|
||||
"pytorch-linux-xenial-py3.8",
|
||||
"pytorch-linux-xenial-py3.6-clang7",
|
||||
"pytorch-linux-xenial-py3.6-gcc5.4", # this one is used in doc builds
|
||||
"pytorch-linux-xenial-py3.6-gcc7.2",
|
||||
"pytorch-linux-xenial-py3.6-gcc7",
|
||||
"pytorch-linux-bionic-rocm3.9-py3.6",
|
||||
"pytorch-linux-bionic-rocm4.0.1-py3.6",
|
||||
"pytorch-linux-bionic-rocm4.1-py3.6",
|
||||
"pytorch-linux-bionic-rocm4.2-py3.6",
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
"""Generates a list of docker image build definitions"""
|
||||
ret = []
|
||||
for image_name in IMAGE_NAMES:
|
||||
parameters = OrderedDict({
|
||||
"name": quote(f"docker-{image_name}"),
|
||||
"image_name": quote(image_name),
|
||||
})
|
||||
if image_name == "pytorch-linux-xenial-py3.6-gcc5.4":
|
||||
# pushing documentation on tags requires CircleCI to also
|
||||
# build all the dependencies on tags, including this docker image
|
||||
parameters['filters'] = gen_filter_dict(branches_list=r"/.*/",
|
||||
tags_list=RC_PATTERN)
|
||||
ret.append(OrderedDict(
|
||||
{
|
||||
"docker_build_job": parameters
|
||||
}
|
||||
))
|
||||
return ret
|
||||
@ -1,78 +0,0 @@
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
from cimodel.data.simple.util.versions import MultiPartVersion, CudaVersion
|
||||
from cimodel.data.simple.util.docker_constants import DOCKER_IMAGE_BASIC, DOCKER_IMAGE_CUDA_10_2
|
||||
|
||||
|
||||
class GeConfigTestJob:
|
||||
def __init__(self,
|
||||
py_version,
|
||||
gcc_version,
|
||||
cuda_version,
|
||||
variant_parts,
|
||||
extra_requires,
|
||||
use_cuda_docker=False,
|
||||
build_env_override=None):
|
||||
|
||||
self.py_version = py_version
|
||||
self.gcc_version = gcc_version
|
||||
self.cuda_version = cuda_version
|
||||
self.variant_parts = variant_parts
|
||||
self.extra_requires = extra_requires
|
||||
self.use_cuda_docker = use_cuda_docker
|
||||
self.build_env_override = build_env_override
|
||||
|
||||
def get_all_parts(self, with_dots):
|
||||
|
||||
maybe_py_version = self.py_version.render_dots_or_parts(with_dots) if self.py_version else []
|
||||
maybe_gcc_version = self.gcc_version.render_dots_or_parts(with_dots) if self.gcc_version else []
|
||||
maybe_cuda_version = self.cuda_version.render_dots_or_parts(with_dots) if self.cuda_version else []
|
||||
|
||||
common_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
] + maybe_cuda_version + maybe_py_version + maybe_gcc_version
|
||||
|
||||
return common_parts + self.variant_parts
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
resource_class = "gpu.medium" if self.use_cuda_docker else "large"
|
||||
docker_image = DOCKER_IMAGE_CUDA_10_2 if self.use_cuda_docker else DOCKER_IMAGE_BASIC
|
||||
full_name = "_".join(self.get_all_parts(False))
|
||||
build_env = self.build_env_override or "-".join(self.get_all_parts(True))
|
||||
|
||||
props_dict = {
|
||||
"name": full_name,
|
||||
"build_environment": build_env,
|
||||
"requires": self.extra_requires,
|
||||
"resource_class": resource_class,
|
||||
"docker_image": docker_image,
|
||||
}
|
||||
|
||||
if self.use_cuda_docker:
|
||||
props_dict["use_cuda_docker_runtime"] = miniutils.quote(str(1))
|
||||
|
||||
return [{"pytorch_linux_test": props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
GeConfigTestJob(
|
||||
MultiPartVersion([3, 6], "py"),
|
||||
MultiPartVersion([5, 4], "gcc"),
|
||||
None,
|
||||
["jit_legacy", "test"],
|
||||
["pytorch_linux_xenial_py3_6_gcc5_4_build"]),
|
||||
GeConfigTestJob(
|
||||
None,
|
||||
None,
|
||||
CudaVersion(10, 2),
|
||||
["cudnn7", "py3", "jit_legacy", "test"],
|
||||
["pytorch_linux_xenial_cuda10_2_cudnn7_py3_gcc7_build"],
|
||||
use_cuda_docker=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,82 +0,0 @@
|
||||
from cimodel.data.simple.util.versions import MultiPartVersion
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
XCODE_VERSION = MultiPartVersion([12, 0, 0])
|
||||
|
||||
|
||||
class ArchVariant:
|
||||
def __init__(self, name, custom_build_name=""):
|
||||
self.name = name
|
||||
self.custom_build_name = custom_build_name
|
||||
|
||||
def render(self):
|
||||
extra_parts = [self.custom_build_name] if len(self.custom_build_name) > 0 else []
|
||||
return "_".join([self.name] + extra_parts)
|
||||
|
||||
|
||||
def get_platform(arch_variant_name):
|
||||
return "SIMULATOR" if arch_variant_name == "x86_64" else "OS"
|
||||
|
||||
|
||||
class IOSJob:
|
||||
def __init__(self, xcode_version, arch_variant, is_org_member_context=True, extra_props=None):
|
||||
self.xcode_version = xcode_version
|
||||
self.arch_variant = arch_variant
|
||||
self.is_org_member_context = is_org_member_context
|
||||
self.extra_props = extra_props
|
||||
|
||||
def gen_name_parts(self, with_version_dots):
|
||||
|
||||
version_parts = self.xcode_version.render_dots_or_parts(with_version_dots)
|
||||
build_variant_suffix = "_".join([self.arch_variant.render(), "build"])
|
||||
|
||||
return [
|
||||
"pytorch",
|
||||
"ios",
|
||||
] + version_parts + [
|
||||
build_variant_suffix,
|
||||
]
|
||||
|
||||
def gen_job_name(self):
|
||||
return "_".join(self.gen_name_parts(False))
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
platform_name = get_platform(self.arch_variant.name)
|
||||
|
||||
props_dict = {
|
||||
"build_environment": "-".join(self.gen_name_parts(True)),
|
||||
"ios_arch": self.arch_variant.name,
|
||||
"ios_platform": platform_name,
|
||||
"name": self.gen_job_name(),
|
||||
}
|
||||
|
||||
if self.is_org_member_context:
|
||||
props_dict["context"] = "org-member"
|
||||
|
||||
if self.extra_props:
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
return [{"pytorch_ios_build": props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64"), is_org_member_context=False, extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64", "full_jit"), is_org_member_context=False, extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(False)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64"), extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={
|
||||
"use_metal": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "full_jit"), extra_props={
|
||||
"lite_interpreter": miniutils.quote(str(int(False)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom"), extra_props={
|
||||
"op_list": "mobilenetv2.yaml",
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,52 +0,0 @@
|
||||
class MacOsJob:
|
||||
def __init__(self, os_version, is_build=False, is_test=False, extra_props=tuple()):
|
||||
# extra_props is tuple type, because mutable data structures for argument defaults
|
||||
# is not recommended.
|
||||
self.os_version = os_version
|
||||
self.is_build = is_build
|
||||
self.is_test = is_test
|
||||
self.extra_props = dict(extra_props)
|
||||
|
||||
def gen_tree(self):
|
||||
non_phase_parts = ["pytorch", "macos", self.os_version, "py3"]
|
||||
|
||||
extra_name_list = [name for name, exist in self.extra_props.items() if exist]
|
||||
full_job_name_list = non_phase_parts + extra_name_list + [
|
||||
'build' if self.is_build else None,
|
||||
'test' if self.is_test else None,
|
||||
]
|
||||
|
||||
full_job_name = "_".join(list(filter(None, full_job_name_list)))
|
||||
|
||||
test_build_dependency = "_".join(non_phase_parts + ["build"])
|
||||
extra_dependencies = [test_build_dependency] if self.is_test else []
|
||||
job_dependencies = extra_dependencies
|
||||
|
||||
# Yes we name the job after itself, it needs a non-empty value in here
|
||||
# for the YAML output to work.
|
||||
props_dict = {"requires": job_dependencies, "name": full_job_name}
|
||||
|
||||
return [{full_job_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
MacOsJob("10_15", is_build=True),
|
||||
MacOsJob("10_13", is_build=True),
|
||||
MacOsJob(
|
||||
"10_13",
|
||||
is_build=False,
|
||||
is_test=True,
|
||||
),
|
||||
MacOsJob(
|
||||
"10_13",
|
||||
is_build=True,
|
||||
is_test=True,
|
||||
extra_props=tuple({
|
||||
"lite_interpreter": True
|
||||
}.items()),
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,86 +0,0 @@
|
||||
"""
|
||||
PyTorch Mobile PR builds (use linux host toolchain + mobile build options)
|
||||
"""
|
||||
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.data.simple.util.branch_filters
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_ASAN,
|
||||
DOCKER_REQUIREMENT_ASAN,
|
||||
DOCKER_IMAGE_NDK,
|
||||
DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
|
||||
|
||||
class MobileJob:
|
||||
def __init__(
|
||||
self,
|
||||
docker_image,
|
||||
docker_requires,
|
||||
variant_parts,
|
||||
is_master_only=False):
|
||||
self.docker_image = docker_image
|
||||
self.docker_requires = docker_requires
|
||||
self.variant_parts = variant_parts
|
||||
self.is_master_only = is_master_only
|
||||
|
||||
def gen_tree(self):
|
||||
non_phase_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"mobile",
|
||||
] + self.variant_parts
|
||||
|
||||
full_job_name = "_".join(non_phase_parts)
|
||||
build_env_name = "-".join(non_phase_parts)
|
||||
|
||||
props_dict = {
|
||||
"build_environment": build_env_name,
|
||||
"build_only": miniutils.quote(str(int(True))),
|
||||
"docker_image": self.docker_image,
|
||||
"requires": self.docker_requires,
|
||||
"name": full_job_name,
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
|
||||
|
||||
return [{"pytorch_linux_build": props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
MobileJob(
|
||||
DOCKER_IMAGE_ASAN,
|
||||
[DOCKER_REQUIREMENT_ASAN],
|
||||
["build"]
|
||||
),
|
||||
|
||||
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
|
||||
MobileJob(
|
||||
DOCKER_IMAGE_NDK,
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
["custom", "build", "dynamic"]
|
||||
),
|
||||
|
||||
MobileJob(
|
||||
DOCKER_IMAGE_NDK,
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
["custom", "build", "static"]
|
||||
),
|
||||
|
||||
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
|
||||
# Most of this CI is already covered by "mobile-custom-build-dynamic" job
|
||||
MobileJob(
|
||||
DOCKER_IMAGE_NDK,
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
["code", "analysis"],
|
||||
True
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,77 +0,0 @@
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_NDK,
|
||||
DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
|
||||
|
||||
class AndroidNightlyJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
template_name,
|
||||
extra_props=None,
|
||||
with_docker=True,
|
||||
requires=None,
|
||||
no_build_suffix=False):
|
||||
|
||||
self.variant = variant
|
||||
self.template_name = template_name
|
||||
self.extra_props = extra_props or {}
|
||||
self.with_docker = with_docker
|
||||
self.requires = requires
|
||||
self.no_build_suffix = no_build_suffix
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"android",
|
||||
"ndk",
|
||||
"r19c",
|
||||
] + self.variant
|
||||
|
||||
build_suffix = [] if self.no_build_suffix else ["build"]
|
||||
full_job_name = "_".join(["nightly"] + base_name_parts + build_suffix)
|
||||
build_env_name = "-".join(base_name_parts)
|
||||
|
||||
props_dict = {
|
||||
"name": full_job_name,
|
||||
"requires": self.requires,
|
||||
"filters": {"branches": {"only": "nightly"}},
|
||||
}
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
if self.with_docker:
|
||||
props_dict["docker_image"] = DOCKER_IMAGE_NDK
|
||||
props_dict["build_environment"] = build_env_name
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
BASE_REQUIRES = [DOCKER_REQUIREMENT_NDK]
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
AndroidNightlyJob(["x86_32"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["x86_64"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["arm", "v7a"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["arm", "v8a"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["android_gradle"], "pytorch_android_gradle_build",
|
||||
with_docker=False,
|
||||
requires=[
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build"]),
|
||||
AndroidNightlyJob(["x86_32_android_publish_snapshot"], "pytorch_android_publish_snapshot",
|
||||
extra_props={"context": "org-member"},
|
||||
with_docker=False,
|
||||
requires=["nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_android_gradle_build"],
|
||||
no_build_suffix=True),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,68 +0,0 @@
|
||||
import cimodel.data.simple.ios_definitions as ios_definitions
|
||||
|
||||
|
||||
class IOSNightlyJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
is_upload=False):
|
||||
|
||||
self.variant = variant
|
||||
self.is_upload = is_upload
|
||||
|
||||
def get_phase_name(self):
|
||||
return "upload" if self.is_upload else "build"
|
||||
|
||||
def get_common_name_pieces(self, with_version_dots):
|
||||
|
||||
extra_name_suffix = [self.get_phase_name()] if self.is_upload else []
|
||||
|
||||
common_name_pieces = [
|
||||
"ios",
|
||||
] + ios_definitions.XCODE_VERSION.render_dots_or_parts(with_version_dots) + [
|
||||
"nightly",
|
||||
self.variant,
|
||||
"build",
|
||||
] + extra_name_suffix
|
||||
|
||||
return common_name_pieces
|
||||
|
||||
def gen_job_name(self):
|
||||
return "_".join(["pytorch"] + self.get_common_name_pieces(False))
|
||||
|
||||
def gen_tree(self):
|
||||
extra_requires = [x.gen_job_name() for x in BUILD_CONFIGS] if self.is_upload else []
|
||||
|
||||
props_dict = {
|
||||
"build_environment": "-".join(["libtorch"] + self.get_common_name_pieces(True)),
|
||||
"requires": extra_requires,
|
||||
"context": "org-member",
|
||||
"filters": {"branches": {"only": "nightly"}},
|
||||
}
|
||||
|
||||
if not self.is_upload:
|
||||
props_dict["ios_arch"] = self.variant
|
||||
props_dict["ios_platform"] = ios_definitions.get_platform(self.variant)
|
||||
props_dict["name"] = self.gen_job_name()
|
||||
|
||||
template_name = "_".join([
|
||||
"binary",
|
||||
"ios",
|
||||
self.get_phase_name(),
|
||||
])
|
||||
|
||||
return [{template_name: props_dict}]
|
||||
|
||||
|
||||
BUILD_CONFIGS = [
|
||||
IOSNightlyJob("x86_64"),
|
||||
IOSNightlyJob("arm64"),
|
||||
]
|
||||
|
||||
|
||||
WORKFLOW_DATA = BUILD_CONFIGS + [
|
||||
IOSNightlyJob("binary", is_upload=True),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,27 +0,0 @@
|
||||
NON_PR_BRANCH_LIST = [
|
||||
"master",
|
||||
r"/ci-all\/.*/",
|
||||
r"/release\/.*/",
|
||||
]
|
||||
|
||||
PR_BRANCH_LIST = [
|
||||
r"/gh\/.*\/head/",
|
||||
r"/pull\/.*/",
|
||||
]
|
||||
|
||||
RC_PATTERN = r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
|
||||
|
||||
def gen_filter_dict(
|
||||
branches_list=NON_PR_BRANCH_LIST,
|
||||
tags_list=None
|
||||
):
|
||||
"""Generates a filter dictionary for use with CircleCI's job filter"""
|
||||
filter_dict = {
|
||||
"branches": {
|
||||
"only": branches_list,
|
||||
},
|
||||
}
|
||||
|
||||
if tags_list is not None:
|
||||
filter_dict["tags"] = {"only": tags_list}
|
||||
return filter_dict
|
||||
@ -1,33 +0,0 @@
|
||||
AWS_DOCKER_HOST = "308535385114.dkr.ecr.us-east-1.amazonaws.com"
|
||||
|
||||
def gen_docker_image(container_type):
|
||||
return (
|
||||
"/".join([AWS_DOCKER_HOST, "pytorch", container_type]),
|
||||
f"docker-{container_type}",
|
||||
)
|
||||
|
||||
def gen_docker_image_requires(image_name):
|
||||
return [f"docker-{image_name}"]
|
||||
|
||||
|
||||
DOCKER_IMAGE_BASIC, DOCKER_REQUIREMENT_BASE = gen_docker_image(
|
||||
"pytorch-linux-xenial-py3.6-gcc5.4"
|
||||
)
|
||||
|
||||
DOCKER_IMAGE_CUDA_10_2, DOCKER_REQUIREMENT_CUDA_10_2 = gen_docker_image(
|
||||
"pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
)
|
||||
|
||||
DOCKER_IMAGE_GCC7, DOCKER_REQUIREMENT_GCC7 = gen_docker_image(
|
||||
"pytorch-linux-xenial-py3.6-gcc7"
|
||||
)
|
||||
|
||||
|
||||
def gen_mobile_docker(specifier):
|
||||
container_type = "pytorch-linux-xenial-py3-clang5-" + specifier
|
||||
return gen_docker_image(container_type)
|
||||
|
||||
|
||||
DOCKER_IMAGE_ASAN, DOCKER_REQUIREMENT_ASAN = gen_mobile_docker("asan")
|
||||
|
||||
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK = gen_mobile_docker("android-ndk-r19c")
|
||||
@ -1,34 +0,0 @@
|
||||
class MultiPartVersion:
|
||||
def __init__(self, parts, prefix=""):
|
||||
self.parts = parts
|
||||
self.prefix = prefix
|
||||
|
||||
def prefixed_parts(self):
|
||||
"""
|
||||
Prepends the first element of the version list
|
||||
with the prefix string.
|
||||
"""
|
||||
if self.parts:
|
||||
return [self.prefix + str(self.parts[0])] + [str(part) for part in self.parts[1:]]
|
||||
else:
|
||||
return [self.prefix]
|
||||
|
||||
def render_dots(self):
|
||||
return ".".join(self.prefixed_parts())
|
||||
|
||||
def render_dots_or_parts(self, with_dots):
|
||||
if with_dots:
|
||||
return [self.render_dots()]
|
||||
else:
|
||||
return self.prefixed_parts()
|
||||
|
||||
|
||||
class CudaVersion(MultiPartVersion):
|
||||
def __init__(self, major, minor):
|
||||
self.major = major
|
||||
self.minor = minor
|
||||
|
||||
super().__init__([self.major, self.minor], "cuda")
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.major}.{self.minor}"
|
||||
@ -1,159 +0,0 @@
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN, NON_PR_BRANCH_LIST
|
||||
from cimodel.data.simple.util.versions import CudaVersion
|
||||
|
||||
|
||||
class WindowsJob:
|
||||
def __init__(
|
||||
self,
|
||||
test_index,
|
||||
vscode_spec,
|
||||
cuda_version,
|
||||
force_on_cpu=False,
|
||||
multi_gpu=False,
|
||||
master_only=False,
|
||||
nightly_only=False,
|
||||
master_and_nightly=False
|
||||
):
|
||||
self.test_index = test_index
|
||||
self.vscode_spec = vscode_spec
|
||||
self.cuda_version = cuda_version
|
||||
self.force_on_cpu = force_on_cpu
|
||||
self.multi_gpu = multi_gpu
|
||||
self.master_only = master_only
|
||||
self.nightly_only = nightly_only
|
||||
self.master_and_nightly = master_and_nightly
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_phase = "build" if self.test_index is None else "test"
|
||||
numbered_phase = (
|
||||
base_phase if self.test_index is None else base_phase + str(self.test_index)
|
||||
)
|
||||
|
||||
key_parts = ["pytorch", "windows", base_phase]
|
||||
if self.multi_gpu:
|
||||
key_parts.append('multigpu')
|
||||
key_name = "_".join(key_parts)
|
||||
|
||||
cpu_forcing_name_parts = ["on", "cpu"] if self.force_on_cpu else []
|
||||
|
||||
target_arch = self.cuda_version.render_dots() if self.cuda_version else "cpu"
|
||||
|
||||
python_version = "3.8"
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"windows",
|
||||
self.vscode_spec.render(),
|
||||
"py" + python_version.replace(".", ""),
|
||||
target_arch,
|
||||
]
|
||||
|
||||
prerequisite_jobs = []
|
||||
if base_phase == "test":
|
||||
prerequisite_jobs.append("_".join(base_name_parts + ["build"]))
|
||||
|
||||
if self.cuda_version:
|
||||
self.cudnn_version = 8 if self.cuda_version.major == 11 else 7
|
||||
|
||||
arch_env_elements = (
|
||||
["cuda" + str(self.cuda_version.major), "cudnn" + str(self.cudnn_version)]
|
||||
if self.cuda_version
|
||||
else ["cpu"]
|
||||
)
|
||||
|
||||
build_environment_string = "-".join(
|
||||
["pytorch", "win"]
|
||||
+ self.vscode_spec.get_elements()
|
||||
+ arch_env_elements
|
||||
+ ["py" + python_version.split(".")[0]]
|
||||
)
|
||||
|
||||
is_running_on_cuda = bool(self.cuda_version) and not self.force_on_cpu
|
||||
|
||||
if self.multi_gpu:
|
||||
props_dict = {"requires": prerequisite_jobs}
|
||||
else:
|
||||
props_dict = {
|
||||
"build_environment": build_environment_string,
|
||||
"python_version": miniutils.quote(python_version),
|
||||
"vc_version": miniutils.quote(self.vscode_spec.dotted_version()),
|
||||
"vc_year": miniutils.quote(str(self.vscode_spec.year)),
|
||||
"vc_product": self.vscode_spec.get_product(),
|
||||
"use_cuda": miniutils.quote(str(int(is_running_on_cuda))),
|
||||
"requires": prerequisite_jobs,
|
||||
}
|
||||
|
||||
if self.master_only:
|
||||
props_dict[
|
||||
"filters"
|
||||
] = gen_filter_dict()
|
||||
elif self.nightly_only:
|
||||
props_dict[
|
||||
"filters"
|
||||
] = gen_filter_dict(branches_list=["nightly"], tags_list=RC_PATTERN)
|
||||
elif self.master_and_nightly:
|
||||
props_dict[
|
||||
"filters"
|
||||
] = gen_filter_dict(branches_list=NON_PR_BRANCH_LIST + ["nightly"], tags_list=RC_PATTERN)
|
||||
|
||||
name_parts = base_name_parts + cpu_forcing_name_parts + [numbered_phase]
|
||||
|
||||
if not self.multi_gpu:
|
||||
if base_phase == "test":
|
||||
test_name = "-".join(["pytorch", "windows", numbered_phase])
|
||||
props_dict["test_name"] = test_name
|
||||
|
||||
if is_running_on_cuda:
|
||||
props_dict["executor"] = "windows-with-nvidia-gpu"
|
||||
|
||||
props_dict["cuda_version"] = (
|
||||
miniutils.quote(str(self.cuda_version))
|
||||
if self.cuda_version
|
||||
else "cpu"
|
||||
)
|
||||
|
||||
props_dict["name"] = "_".join(name_parts)
|
||||
|
||||
return [{key_name: props_dict}]
|
||||
|
||||
|
||||
class VcSpec:
|
||||
def __init__(self, year, version_elements=None, hide_version=False):
|
||||
self.year = year
|
||||
self.version_elements = version_elements or []
|
||||
self.hide_version = hide_version
|
||||
|
||||
def get_elements(self):
|
||||
if self.hide_version:
|
||||
return [self.prefixed_year()]
|
||||
return [self.prefixed_year()] + self.version_elements
|
||||
|
||||
def get_product(self):
|
||||
return "BuildTools"
|
||||
|
||||
def dotted_version(self):
|
||||
return ".".join(self.version_elements)
|
||||
|
||||
def prefixed_year(self):
|
||||
return "vs" + str(self.year)
|
||||
|
||||
def render(self):
|
||||
return "_".join(self.get_elements())
|
||||
|
||||
_VC2019 = VcSpec(2019)
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
# VS2019 CUDA-10.1
|
||||
WindowsJob(None, _VC2019, CudaVersion(10, 1), master_only=True),
|
||||
# VS2019 CUDA-10.1 force on cpu
|
||||
WindowsJob(1, _VC2019, CudaVersion(10, 1), force_on_cpu=True, master_only=True),
|
||||
|
||||
# TODO: This test is disabled due to https://github.com/pytorch/pytorch/issues/59724
|
||||
# WindowsJob('_azure_multi_gpu', _VC2019, CudaVersion(11, 1), multi_gpu=True, master_and_nightly=True),
|
||||
]
|
||||
|
||||
|
||||
def get_windows_workflows():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -1,3 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Dict
|
||||
|
||||
|
||||
@ -1,3 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
def quote(s):
|
||||
return sandwich('"', s)
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
from collections import OrderedDict
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
LIST_MARKER = "- "
|
||||
@ -31,8 +32,7 @@ def render(fh, data, depth, is_list_member=False):
|
||||
tuples.sort()
|
||||
|
||||
for i, (k, v) in enumerate(tuples):
|
||||
if not v:
|
||||
continue
|
||||
|
||||
# If this dict is itself a list member, the first key gets prefixed with a list marker
|
||||
list_marker_prefix = LIST_MARKER if is_list_member and not i else ""
|
||||
|
||||
@ -46,7 +46,5 @@ def render(fh, data, depth, is_list_member=False):
|
||||
render(fh, v, depth, True)
|
||||
|
||||
else:
|
||||
# use empty quotes to denote an empty string value instead of blank space
|
||||
modified_data = miniutils.quote(data) if data == "" else data
|
||||
list_member_prefix = indentation + LIST_MARKER if is_list_member else ""
|
||||
fh.write(list_member_prefix + str(modified_data) + "\n")
|
||||
fh.write(list_member_prefix + str(data) + "\n")
|
||||
|
||||
86
.circleci/cimodel/lib/visualization.py
Normal file
86
.circleci/cimodel/lib/visualization.py
Normal file
@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
This module encapsulates dependencies on pygraphviz
|
||||
"""
|
||||
|
||||
import colorsys
|
||||
|
||||
import cimodel.lib.conf_tree as conf_tree
|
||||
|
||||
|
||||
def rgb2hex(rgb_tuple):
|
||||
def to_hex(f):
|
||||
return "%02x" % int(f * 255)
|
||||
|
||||
return "#" + "".join(map(to_hex, list(rgb_tuple)))
|
||||
|
||||
|
||||
def handle_missing_graphviz(f):
|
||||
"""
|
||||
If the user has not installed pygraphviz, this causes
|
||||
calls to the draw() method of the returned object to do nothing.
|
||||
"""
|
||||
try:
|
||||
import pygraphviz # noqa: F401
|
||||
return f
|
||||
|
||||
except ModuleNotFoundError:
|
||||
|
||||
class FakeGraph:
|
||||
def draw(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
return lambda _: FakeGraph()
|
||||
|
||||
|
||||
@handle_missing_graphviz
|
||||
def generate_graph(toplevel_config_node):
|
||||
"""
|
||||
Traverses the graph once first just to find the max depth
|
||||
"""
|
||||
|
||||
config_list = conf_tree.dfs(toplevel_config_node)
|
||||
|
||||
max_depth = 0
|
||||
for config in config_list:
|
||||
max_depth = max(max_depth, config.get_depth())
|
||||
|
||||
# color the nodes using the max depth
|
||||
|
||||
from pygraphviz import AGraph
|
||||
dot = AGraph()
|
||||
|
||||
def node_discovery_callback(node, sibling_index, sibling_count):
|
||||
depth = node.get_depth()
|
||||
|
||||
sat_min, sat_max = 0.1, 0.6
|
||||
sat_range = sat_max - sat_min
|
||||
|
||||
saturation_fraction = sibling_index / float(sibling_count - 1) if sibling_count > 1 else 1
|
||||
saturation = sat_min + sat_range * saturation_fraction
|
||||
|
||||
# TODO Use a hash of the node label to determine the color
|
||||
hue = depth / float(max_depth + 1)
|
||||
|
||||
rgb_tuple = colorsys.hsv_to_rgb(hue, saturation, 1)
|
||||
|
||||
this_node_key = node.get_node_key()
|
||||
|
||||
dot.add_node(
|
||||
this_node_key,
|
||||
label=node.get_label(),
|
||||
style="filled",
|
||||
# fillcolor=hex_color + ":orange",
|
||||
fillcolor=rgb2hex(rgb_tuple),
|
||||
penwidth=3,
|
||||
color=rgb2hex(colorsys.hsv_to_rgb(hue, saturation, 0.9))
|
||||
)
|
||||
|
||||
def child_callback(node, child):
|
||||
this_node_key = node.get_node_key()
|
||||
child_node_key = child.get_node_key()
|
||||
dot.add_edge((this_node_key, child_node_key))
|
||||
|
||||
conf_tree.dfs_recurse(toplevel_config_node, lambda x: None, node_discovery_callback, child_callback)
|
||||
return dot
|
||||
@ -1,17 +0,0 @@
|
||||
#!/bin/bash -xe
|
||||
|
||||
|
||||
YAML_FILENAME=verbatim-sources/workflows-pytorch-ge-config-tests.yml
|
||||
DIFF_TOOL=meld
|
||||
|
||||
|
||||
# Allows this script to be invoked from any directory:
|
||||
cd $(dirname "$0")
|
||||
|
||||
pushd ..
|
||||
|
||||
|
||||
$DIFF_TOOL $YAML_FILENAME <(./codegen_validation/normalize_yaml_fragment.py < $YAML_FILENAME)
|
||||
|
||||
|
||||
popd
|
||||
@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
# Need to import modules that lie on an upward-relative path
|
||||
sys.path.append(os.path.join(sys.path[0], '..'))
|
||||
|
||||
import cimodel.lib.miniyaml as miniyaml
|
||||
|
||||
|
||||
def regurgitate(depth, use_pyyaml_formatter=False):
|
||||
data = yaml.safe_load(sys.stdin)
|
||||
|
||||
if use_pyyaml_formatter:
|
||||
output = yaml.dump(data, sort_keys=True)
|
||||
sys.stdout.write(output)
|
||||
else:
|
||||
miniyaml.render(sys.stdout, data, depth)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
regurgitate(3)
|
||||
@ -1,15 +0,0 @@
|
||||
#!/bin/bash -xe
|
||||
|
||||
YAML_FILENAME=$1
|
||||
|
||||
# Allows this script to be invoked from any directory:
|
||||
cd $(dirname "$0")
|
||||
|
||||
pushd ..
|
||||
|
||||
TEMP_FILENAME=$(mktemp)
|
||||
|
||||
cat $YAML_FILENAME | ./codegen_validation/normalize_yaml_fragment.py > $TEMP_FILENAME
|
||||
mv $TEMP_FILENAME $YAML_FILENAME
|
||||
|
||||
popd
|
||||
12255
.circleci/config.yml
12255
.circleci/config.yml
File diff suppressed because it is too large
Load Diff
@ -1,31 +0,0 @@
|
||||
# Docker images for Jenkins
|
||||
|
||||
This directory contains everything needed to build the Docker images
|
||||
that are used in our CI
|
||||
|
||||
The Dockerfiles located in subdirectories are parameterized to
|
||||
conditionally run build stages depending on build arguments passed to
|
||||
`docker build`. This lets us use only a few Dockerfiles for many
|
||||
images. The different configurations are identified by a freeform
|
||||
string that we call a _build environment_. This string is persisted in
|
||||
each image as the `BUILD_ENVIRONMENT` environment variable.
|
||||
|
||||
See `build.sh` for valid build environments (it's the giant switch).
|
||||
|
||||
Docker builds are now defined with `.circleci/cimodel/data/simple/docker_definitions.py`
|
||||
|
||||
## Contents
|
||||
|
||||
* `build.sh` -- dispatch script to launch all builds
|
||||
* `common` -- scripts used to execute individual Docker build stages
|
||||
* `ubuntu-cuda` -- Dockerfile for Ubuntu image with CUDA support for nvidia-docker
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Build a specific image
|
||||
./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest
|
||||
|
||||
# Set flags (see build.sh) and build image
|
||||
sudo bash -c 'BREAKPAD=1 ./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest
|
||||
```
|
||||
@ -1 +0,0 @@
|
||||
<manifest package="org.pytorch.deps" />
|
||||
@ -1,66 +0,0 @@
|
||||
buildscript {
|
||||
ext {
|
||||
minSdkVersion = 21
|
||||
targetSdkVersion = 28
|
||||
compileSdkVersion = 28
|
||||
buildToolsVersion = '28.0.3'
|
||||
|
||||
coreVersion = "1.2.0"
|
||||
extJUnitVersion = "1.1.1"
|
||||
runnerVersion = "1.2.0"
|
||||
rulesVersion = "1.2.0"
|
||||
junitVersion = "4.12"
|
||||
}
|
||||
|
||||
repositories {
|
||||
google()
|
||||
mavenLocal()
|
||||
mavenCentral()
|
||||
jcenter()
|
||||
}
|
||||
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:4.1.2'
|
||||
classpath 'com.vanniktech:gradle-maven-publish-plugin:0.14.2'
|
||||
}
|
||||
}
|
||||
|
||||
repositories {
|
||||
google()
|
||||
jcenter()
|
||||
}
|
||||
|
||||
apply plugin: 'com.android.library'
|
||||
|
||||
android {
|
||||
compileSdkVersion rootProject.compileSdkVersion
|
||||
buildToolsVersion rootProject.buildToolsVersion
|
||||
|
||||
defaultConfig {
|
||||
minSdkVersion minSdkVersion
|
||||
targetSdkVersion targetSdkVersion
|
||||
}
|
||||
|
||||
sourceSets {
|
||||
main {
|
||||
manifest.srcFile 'AndroidManifest.xml'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
implementation 'com.android.support:appcompat-v7:28.0.0'
|
||||
implementation 'androidx.appcompat:appcompat:1.0.0'
|
||||
implementation 'com.facebook.fbjni:fbjni-java-only:0.0.3'
|
||||
implementation 'com.google.code.findbugs:jsr305:3.0.1'
|
||||
implementation 'com.facebook.soloader:nativeloader:0.8.0'
|
||||
|
||||
implementation 'junit:junit:' + rootProject.junitVersion
|
||||
implementation 'androidx.test:core:' + rootProject.coreVersion
|
||||
|
||||
implementation 'junit:junit:' + rootProject.junitVersion
|
||||
implementation 'androidx.test:core:' + rootProject.coreVersion
|
||||
implementation 'androidx.test.ext:junit:' + rootProject.extJUnitVersion
|
||||
implementation 'androidx.test:rules:' + rootProject.rulesVersion
|
||||
implementation 'androidx.test:runner:' + rootProject.runnerVersion
|
||||
}
|
||||
@ -1,421 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
image="$1"
|
||||
shift
|
||||
|
||||
if [ -z "${image}" ]; then
|
||||
echo "Usage: $0 IMAGE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function extract_version_from_image_name() {
|
||||
eval export $2=$(echo "${image}" | perl -n -e"/$1(\d+(\.\d+)?(\.\d+)?)/ && print \$1")
|
||||
if [ "x${!2}" = x ]; then
|
||||
echo "variable '$2' not correctly parsed from image='$image'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function extract_all_from_image_name() {
|
||||
# parts $image into array, splitting on '-'
|
||||
keep_IFS="$IFS"
|
||||
IFS="-"
|
||||
declare -a parts=($image)
|
||||
IFS="$keep_IFS"
|
||||
unset keep_IFS
|
||||
|
||||
for part in "${parts[@]}"; do
|
||||
name=$(echo "${part}" | perl -n -e"/([a-zA-Z]+)\d+(\.\d+)?(\.\d+)?/ && print \$1")
|
||||
vername="${name^^}_VERSION"
|
||||
# "py" is the odd one out, needs this special case
|
||||
if [ "x${name}" = xpy ]; then
|
||||
vername=ANACONDA_PYTHON_VERSION
|
||||
fi
|
||||
# skip non-conforming fields such as "pytorch", "linux" or "xenial" without version string
|
||||
if [ -n "${name}" ]; then
|
||||
extract_version_from_image_name "${name}" "${vername}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
if [[ "$image" == *-xenial* ]]; then
|
||||
UBUNTU_VERSION=16.04
|
||||
elif [[ "$image" == *-artful* ]]; then
|
||||
UBUNTU_VERSION=17.10
|
||||
elif [[ "$image" == *-bionic* ]]; then
|
||||
UBUNTU_VERSION=18.04
|
||||
elif [[ "$image" == *-focal* ]]; then
|
||||
UBUNTU_VERSION=20.04
|
||||
elif [[ "$image" == *ubuntu* ]]; then
|
||||
extract_version_from_image_name ubuntu UBUNTU_VERSION
|
||||
elif [[ "$image" == *centos* ]]; then
|
||||
extract_version_from_image_name centos CENTOS_VERSION
|
||||
fi
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ]; then
|
||||
OS="ubuntu"
|
||||
elif [ -n "${CENTOS_VERSION}" ]; then
|
||||
OS="centos"
|
||||
else
|
||||
echo "Unable to derive operating system base..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DOCKERFILE="${OS}/Dockerfile"
|
||||
if [[ "$image" == *cuda* ]]; then
|
||||
DOCKERFILE="${OS}-cuda/Dockerfile"
|
||||
elif [[ "$image" == *rocm* ]]; then
|
||||
DOCKERFILE="${OS}-rocm/Dockerfile"
|
||||
fi
|
||||
|
||||
TRAVIS_DL_URL_PREFIX="https://s3.amazonaws.com/travis-python-archives/binaries/ubuntu/14.04/x86_64"
|
||||
|
||||
# It's annoying to rename jobs every time you want to rewrite a
|
||||
# configuration, so we hardcode everything here rather than do it
|
||||
# from scratch
|
||||
case "$image" in
|
||||
pytorch-linux-xenial-py3.8)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py3.6-gcc5.4)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=5
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3.6-gcc7.2)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py3.6-gcc7)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7)
|
||||
CUDA_VERSION=10.0
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7)
|
||||
CUDA_VERSION=10.1
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7)
|
||||
CUDA_VERSION=10.2
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.1
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.3.0 # Deviating from major.minor to conform to nvidia's Docker image names
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang5-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=5.0
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang7-onnx)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang5-android-ndk-r19c)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=5.0
|
||||
LLVMDEV=yes
|
||||
PROTOBUF=yes
|
||||
ANDROID=yes
|
||||
ANDROID_NDK_VERSION=r19c
|
||||
GRADLE_VERSION=6.8.3
|
||||
CMAKE_VERSION=3.7.0
|
||||
NINJA_VERSION=1.9.0
|
||||
;;
|
||||
pytorch-linux-xenial-py3.6-clang7)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-bionic-py3.6-clang9)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
VULKAN_SDK_VERSION=1.2.162.1
|
||||
SWIFTSHADER=yes
|
||||
;;
|
||||
pytorch-linux-bionic-py3.8-gcc9)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9)
|
||||
CUDA_VERSION=10.2
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda10.2-cudnn7-py3.8-gcc9)
|
||||
CUDA_VERSION=10.2
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7)
|
||||
CUDA_VERSION=10.2
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.9
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda11.0-cudnn8-py3.6-gcc9)
|
||||
CUDA_VERSION=11.0
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
ROCM_VERSION=3.9
|
||||
;;
|
||||
pytorch-linux-bionic-rocm4.0.1-py3.6)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
ROCM_VERSION=4.0.1
|
||||
;;
|
||||
pytorch-linux-bionic-rocm4.1-py3.6)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
ROCM_VERSION=4.1
|
||||
;;
|
||||
pytorch-linux-bionic-rocm4.2-py3.6)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
ROCM_VERSION=4.2
|
||||
;;
|
||||
*)
|
||||
# Catch-all for builds that are not hardcoded.
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
BREAKPAD=yes
|
||||
echo "image '$image' did not match an existing build configuration"
|
||||
if [[ "$image" == *py* ]]; then
|
||||
extract_version_from_image_name py ANACONDA_PYTHON_VERSION
|
||||
fi
|
||||
if [[ "$image" == *cuda* ]]; then
|
||||
extract_version_from_image_name cuda CUDA_VERSION
|
||||
extract_version_from_image_name cudnn CUDNN_VERSION
|
||||
fi
|
||||
if [[ "$image" == *rocm* ]]; then
|
||||
extract_version_from_image_name rocm ROCM_VERSION
|
||||
fi
|
||||
if [[ "$image" == *gcc* ]]; then
|
||||
extract_version_from_image_name gcc GCC_VERSION
|
||||
fi
|
||||
if [[ "$image" == *clang* ]]; then
|
||||
extract_version_from_image_name clang CLANG_VERSION
|
||||
fi
|
||||
if [[ "$image" == *devtoolset* ]]; then
|
||||
extract_version_from_image_name devtoolset DEVTOOLSET_VERSION
|
||||
fi
|
||||
if [[ "$image" == *glibc* ]]; then
|
||||
extract_version_from_image_name glibc GLIBC_VERSION
|
||||
fi
|
||||
if [[ "$image" == *cmake* ]]; then
|
||||
extract_version_from_image_name cmake CMAKE_VERSION
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
# Set Jenkins UID and GID if running Jenkins
|
||||
if [ -n "${JENKINS:-}" ]; then
|
||||
JENKINS_UID=$(id -u jenkins)
|
||||
JENKINS_GID=$(id -g jenkins)
|
||||
fi
|
||||
|
||||
tmp_tag="tmp-$(cat /dev/urandom | tr -dc 'a-z' | head -c 32)"
|
||||
|
||||
# Build image
|
||||
# TODO: build-arg THRIFT is not turned on for any image, remove it once we confirm
|
||||
# it's no longer needed.
|
||||
docker build \
|
||||
--no-cache \
|
||||
--progress=plain \
|
||||
--build-arg "TRAVIS_DL_URL_PREFIX=${TRAVIS_DL_URL_PREFIX}" \
|
||||
--build-arg "BUILD_ENVIRONMENT=${image}" \
|
||||
--build-arg "PROTOBUF=${PROTOBUF:-}" \
|
||||
--build-arg "THRIFT=${THRIFT:-}" \
|
||||
--build-arg "LLVMDEV=${LLVMDEV:-}" \
|
||||
--build-arg "DB=${DB:-}" \
|
||||
--build-arg "VISION=${VISION:-}" \
|
||||
--build-arg "EC2=${EC2:-}" \
|
||||
--build-arg "JENKINS=${JENKINS:-}" \
|
||||
--build-arg "JENKINS_UID=${JENKINS_UID:-}" \
|
||||
--build-arg "JENKINS_GID=${JENKINS_GID:-}" \
|
||||
--build-arg "UBUNTU_VERSION=${UBUNTU_VERSION}" \
|
||||
--build-arg "CENTOS_VERSION=${CENTOS_VERSION}" \
|
||||
--build-arg "DEVTOOLSET_VERSION=${DEVTOOLSET_VERSION}" \
|
||||
--build-arg "GLIBC_VERSION=${GLIBC_VERSION}" \
|
||||
--build-arg "CLANG_VERSION=${CLANG_VERSION}" \
|
||||
--build-arg "ANACONDA_PYTHON_VERSION=${ANACONDA_PYTHON_VERSION}" \
|
||||
--build-arg "GCC_VERSION=${GCC_VERSION}" \
|
||||
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
|
||||
--build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \
|
||||
--build-arg "BREAKPAD=${BREAKPAD}" \
|
||||
--build-arg "ANDROID=${ANDROID}" \
|
||||
--build-arg "ANDROID_NDK=${ANDROID_NDK_VERSION}" \
|
||||
--build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \
|
||||
--build-arg "VULKAN_SDK_VERSION=${VULKAN_SDK_VERSION}" \
|
||||
--build-arg "SWIFTSHADER=${SWIFTSHADER}" \
|
||||
--build-arg "CMAKE_VERSION=${CMAKE_VERSION:-}" \
|
||||
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
|
||||
--build-arg "KATEX=${KATEX:-}" \
|
||||
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
|
||||
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
||||
-t "$tmp_tag" \
|
||||
"$@" \
|
||||
.
|
||||
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
|
||||
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
|
||||
# find the correct image. As a result, here we have to replace the
|
||||
# "$UBUNTU_VERSION" == "18.04-rc"
|
||||
# with
|
||||
# "$UBUNTU_VERSION" == "18.04"
|
||||
UBUNTU_VERSION=$(echo ${UBUNTU_VERSION} | sed 's/-rc$//')
|
||||
|
||||
function drun() {
|
||||
docker run --rm "$tmp_tag" $*
|
||||
}
|
||||
|
||||
if [[ "$OS" == "ubuntu" ]]; then
|
||||
if !(drun lsb_release -a 2>&1 | grep -qF Ubuntu); then
|
||||
echo "OS=ubuntu, but:"
|
||||
drun lsb_release -a
|
||||
exit 1
|
||||
fi
|
||||
if !(drun lsb_release -a 2>&1 | grep -qF "$UBUNTU_VERSION"); then
|
||||
echo "UBUNTU_VERSION=$UBUNTU_VERSION, but:"
|
||||
drun lsb_release -a
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
if !(drun python --version 2>&1 | grep -qF "Python $ANACONDA_PYTHON_VERSION"); then
|
||||
echo "ANACONDA_PYTHON_VERSION=$ANACONDA_PYTHON_VERSION, but:"
|
||||
drun python --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$GCC_VERSION" ]; then
|
||||
if !(drun gcc --version 2>&1 | grep -q " $GCC_VERSION\\W"); then
|
||||
echo "GCC_VERSION=$GCC_VERSION, but:"
|
||||
drun gcc --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$CLANG_VERSION" ]; then
|
||||
if !(drun clang --version 2>&1 | grep -qF "clang version $CLANG_VERSION"); then
|
||||
echo "CLANG_VERSION=$CLANG_VERSION, but:"
|
||||
drun clang --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$KATEX" ]; then
|
||||
if !(drun katex --version); then
|
||||
echo "KATEX=$KATEX, but:"
|
||||
drun katex --version
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
@ -1,52 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*)
|
||||
}
|
||||
|
||||
# If UPSTREAM_BUILD_ID is set (see trigger job), then we can
|
||||
# use it to tag this build with the same ID used to tag all other
|
||||
# base image builds. Also, we can try and pull the previous
|
||||
# image first, to avoid rebuilding layers that haven't changed.
|
||||
|
||||
#until we find a way to reliably reuse previous build, this last_tag is not in use
|
||||
# last_tag="$(( CIRCLE_BUILD_NUM - 1 ))"
|
||||
tag="${DOCKER_TAG}"
|
||||
|
||||
|
||||
registry="308535385114.dkr.ecr.us-east-1.amazonaws.com"
|
||||
image="${registry}/pytorch/${IMAGE_NAME}"
|
||||
|
||||
login() {
|
||||
aws ecr get-authorization-token --region us-east-1 --output text --query 'authorizationData[].authorizationToken' |
|
||||
base64 -d |
|
||||
cut -d: -f2 |
|
||||
docker login -u AWS --password-stdin "$1"
|
||||
}
|
||||
|
||||
# Retry on timeouts (can happen on job stampede).
|
||||
retry login "${registry}"
|
||||
|
||||
# Logout on exit
|
||||
trap "docker logout ${registry}" EXIT
|
||||
|
||||
# export EC2=1
|
||||
# export JENKINS=1
|
||||
|
||||
# Try to pull the previous image (perhaps we can reuse some layers)
|
||||
# if [ -n "${last_tag}" ]; then
|
||||
# docker pull "${image}:${last_tag}" || true
|
||||
# fi
|
||||
|
||||
# Build new image
|
||||
./build.sh ${IMAGE_NAME} -t "${image}:${tag}"
|
||||
|
||||
docker push "${image}:${tag}"
|
||||
|
||||
docker save -o "${IMAGE_NAME}:${tag}.tar" "${image}:${tag}"
|
||||
|
||||
if [ -z "${DOCKER_SKIP_S3_UPLOAD:-}" ]; then
|
||||
aws s3 cp "${IMAGE_NAME}:${tag}.tar" "s3://ossci-linux-build/pytorch/base/${IMAGE_NAME}:${tag}.tar" --acl public-read
|
||||
fi
|
||||
@ -1,93 +0,0 @@
|
||||
ARG CENTOS_VERSION
|
||||
|
||||
FROM centos:${CENTOS_VERSION}
|
||||
|
||||
ARG CENTOS_VERSION
|
||||
|
||||
# Install required packages to build Caffe2
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install devtoolset
|
||||
ARG DEVTOOLSET_VERSION
|
||||
ADD ./common/install_devtoolset.sh install_devtoolset.sh
|
||||
RUN bash ./install_devtoolset.sh && rm install_devtoolset.sh
|
||||
ENV BASH_ENV "/etc/profile"
|
||||
|
||||
# (optional) Install non-default glibc version
|
||||
ARG GLIBC_VERSION
|
||||
ADD ./common/install_glibc.sh install_glibc.sh
|
||||
RUN if [ -n "${GLIBC_VERSION}" ]; then bash ./install_glibc.sh; fi
|
||||
RUN rm install_glibc.sh
|
||||
|
||||
# Install user
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, coverage, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# Install rocm
|
||||
ARG ROCM_VERSION
|
||||
ADD ./common/install_rocm.sh install_rocm.sh
|
||||
RUN bash ./install_rocm.sh
|
||||
RUN rm install_rocm.sh
|
||||
ENV PATH /opt/rocm/bin:$PATH
|
||||
ENV PATH /opt/rocm/hcc/bin:$PATH
|
||||
ENV PATH /opt/rocm/hip/bin:$PATH
|
||||
ENV PATH /opt/rocm/opencl/bin:$PATH
|
||||
ENV PATH /opt/rocm/llvm/bin:$PATH
|
||||
ENV MAGMA_HOME /opt/rocm/magma
|
||||
ENV LANG en_US.utf8
|
||||
ENV LC_ALL en_US.utf8
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
ADD ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
ADD ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,109 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
[ -n "${ANDROID_NDK}" ]
|
||||
|
||||
_https_amazon_aws=https://ossci-android.s3.amazonaws.com
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends autotools-dev autoconf unzip
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
pushd /tmp
|
||||
curl -Os --retry 3 $_https_amazon_aws/android-ndk-${ANDROID_NDK}-linux-x86_64.zip
|
||||
popd
|
||||
_ndk_dir=/opt/ndk
|
||||
mkdir -p "$_ndk_dir"
|
||||
unzip -qo /tmp/android*.zip -d "$_ndk_dir"
|
||||
_versioned_dir=$(find "$_ndk_dir/" -mindepth 1 -maxdepth 1 -type d)
|
||||
mv "$_versioned_dir"/* "$_ndk_dir"/
|
||||
rmdir "$_versioned_dir"
|
||||
rm -rf /tmp/*
|
||||
|
||||
# Install OpenJDK
|
||||
# https://hub.docker.com/r/picoded/ubuntu-openjdk-8-jdk/dockerfile/
|
||||
|
||||
sudo apt-get update && \
|
||||
apt-get install -y openjdk-8-jdk && \
|
||||
apt-get install -y ant && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rm -rf /var/cache/oracle-jdk8-installer;
|
||||
|
||||
# Fix certificate issues, found as of
|
||||
# https://bugs.launchpad.net/ubuntu/+source/ca-certificates-java/+bug/983302
|
||||
|
||||
sudo apt-get update && \
|
||||
apt-get install -y ca-certificates-java && \
|
||||
apt-get clean && \
|
||||
update-ca-certificates -f && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rm -rf /var/cache/oracle-jdk8-installer;
|
||||
|
||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
|
||||
|
||||
# Installing android sdk
|
||||
# https://github.com/circleci/circleci-images/blob/staging/android/Dockerfile.m4
|
||||
|
||||
_tmp_sdk_zip=/tmp/android-sdk-linux.zip
|
||||
_android_home=/opt/android/sdk
|
||||
|
||||
rm -rf $_android_home
|
||||
sudo mkdir -p $_android_home
|
||||
curl --silent --show-error --location --fail --retry 3 --output /tmp/android-sdk-linux.zip $_https_amazon_aws/android-sdk-linux-tools3859397-build-tools2803-2902-platforms28-29.zip
|
||||
sudo unzip -q $_tmp_sdk_zip -d $_android_home
|
||||
rm $_tmp_sdk_zip
|
||||
|
||||
sudo chmod -R 777 $_android_home
|
||||
|
||||
export ANDROID_HOME=$_android_home
|
||||
export ADB_INSTALL_TIMEOUT=120
|
||||
|
||||
export PATH="${ANDROID_HOME}/tools:${ANDROID_HOME}/tools/bin:${ANDROID_HOME}/platform-tools:${PATH}"
|
||||
echo "PATH:${PATH}"
|
||||
|
||||
# Installing Gradle
|
||||
echo "GRADLE_VERSION:${GRADLE_VERSION}"
|
||||
_gradle_home=/opt/gradle
|
||||
sudo rm -rf $gradle_home
|
||||
sudo mkdir -p $_gradle_home
|
||||
|
||||
curl --silent --output /tmp/gradle.zip --retry 3 $_https_amazon_aws/gradle-${GRADLE_VERSION}-bin.zip
|
||||
|
||||
sudo unzip -q /tmp/gradle.zip -d $_gradle_home
|
||||
rm /tmp/gradle.zip
|
||||
|
||||
sudo chmod -R 777 $_gradle_home
|
||||
|
||||
export GRADLE_HOME=$_gradle_home/gradle-$GRADLE_VERSION
|
||||
alias gradle="${GRADLE_HOME}/bin/gradle"
|
||||
|
||||
export PATH="${GRADLE_HOME}/bin/:${PATH}"
|
||||
echo "PATH:${PATH}"
|
||||
|
||||
gradle --version
|
||||
|
||||
mkdir /var/lib/jenkins/gradledeps
|
||||
cp build.gradle /var/lib/jenkins/gradledeps
|
||||
cp AndroidManifest.xml /var/lib/jenkins/gradledeps
|
||||
|
||||
pushd /var/lib/jenkins
|
||||
|
||||
export GRADLE_LOCAL_PROPERTIES=gradledeps/local.properties
|
||||
rm -f $GRADLE_LOCAL_PROPERTIES
|
||||
echo "sdk.dir=/opt/android/sdk" >> $GRADLE_LOCAL_PROPERTIES
|
||||
echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES
|
||||
|
||||
chown -R jenkins /var/lib/jenkins/gradledeps
|
||||
chgrp -R jenkins /var/lib/jenkins/gradledeps
|
||||
|
||||
sudo -H -u jenkins $GRADLE_HOME/bin/gradle -Pandroid.useAndroidX=true -p /var/lib/jenkins/gradledeps -g /var/lib/jenkins/.gradle --refresh-dependencies --debug --stacktrace assemble
|
||||
|
||||
chown -R jenkins /var/lib/jenkins/.gradle
|
||||
chgrp -R jenkins /var/lib/jenkins/.gradle
|
||||
|
||||
popd
|
||||
|
||||
rm -rf /var/lib/jenkins/.gradle/daemon
|
||||
@ -1,123 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
install_ubuntu() {
|
||||
# NVIDIA dockers for RC releases use tag names like `11.0-cudnn8-devel-ubuntu18.04-rc`,
|
||||
# for this case we will set UBUNTU_VERSION to `18.04-rc` so that the Dockerfile could
|
||||
# find the correct image. As a result, here we have to check for
|
||||
# "$UBUNTU_VERSION" == "18.04"*
|
||||
# instead of
|
||||
# "$UBUNTU_VERSION" == "18.04"
|
||||
if [[ "$UBUNTU_VERSION" == "18.04"* ]]; then
|
||||
cmake3="cmake=3.10*"
|
||||
else
|
||||
cmake3="cmake=3.5*"
|
||||
fi
|
||||
|
||||
# Install common dependencies
|
||||
apt-get update
|
||||
# TODO: Some of these may not be necessary
|
||||
ccache_deps="asciidoc docbook-xml docbook-xsl xsltproc"
|
||||
numpy_deps="gfortran"
|
||||
apt-get install -y --no-install-recommends \
|
||||
$ccache_deps \
|
||||
$numpy_deps \
|
||||
${cmake3} \
|
||||
apt-transport-https \
|
||||
autoconf \
|
||||
automake \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
libatlas-base-dev \
|
||||
libc6-dbg \
|
||||
libiomp-dev \
|
||||
libyaml-dev \
|
||||
libz-dev \
|
||||
libjpeg-dev \
|
||||
libasound2-dev \
|
||||
libsndfile-dev \
|
||||
software-properties-common \
|
||||
sudo \
|
||||
wget \
|
||||
vim
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
# Need EPEL for many packages we depend on.
|
||||
# See http://fedoraproject.org/wiki/EPEL
|
||||
yum --enablerepo=extras install -y epel-release
|
||||
|
||||
ccache_deps="asciidoc docbook-dtds docbook-style-xsl libxslt"
|
||||
numpy_deps="gcc-gfortran"
|
||||
# Note: protobuf-c-{compiler,devel} on CentOS are too old to be used
|
||||
# for Caffe2. That said, we still install them to make sure the build
|
||||
# system opts to build/use protoc and libprotobuf from third-party.
|
||||
yum install -y \
|
||||
$ccache_deps \
|
||||
$numpy_deps \
|
||||
autoconf \
|
||||
automake \
|
||||
bzip2 \
|
||||
cmake \
|
||||
cmake3 \
|
||||
curl \
|
||||
gcc \
|
||||
gcc-c++ \
|
||||
gflags-devel \
|
||||
git \
|
||||
glibc-devel \
|
||||
glibc-headers \
|
||||
glog-devel \
|
||||
hiredis-devel \
|
||||
libstdc++-devel \
|
||||
libsndfile-devel \
|
||||
make \
|
||||
opencv-devel \
|
||||
sudo \
|
||||
wget \
|
||||
vim
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
rm -rf /var/lib/yum/yumdb
|
||||
rm -rf /var/lib/yum/history
|
||||
}
|
||||
|
||||
# Install base packages depending on the base OS
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
centos)
|
||||
install_centos
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Install Valgrind separately since the apt-get version is too old.
|
||||
mkdir valgrind_build && cd valgrind_build
|
||||
VALGRIND_VERSION=3.16.1
|
||||
if ! wget http://valgrind.org/downloads/valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
then
|
||||
wget https://sourceware.org/ftp/valgrind/valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
fi
|
||||
tar -xjf valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
cd valgrind-${VALGRIND_VERSION}
|
||||
./configure --prefix=/usr/local
|
||||
make -j 4
|
||||
sudo make install
|
||||
cd ../../
|
||||
rm -rf valgrind_build
|
||||
alias valgrind="/usr/local/bin/valgrind"
|
||||
@ -1,25 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
git clone https://github.com/driazati/breakpad.git
|
||||
pushd breakpad
|
||||
|
||||
# breakpad has no actual releases, so this is pinned to the top commit from
|
||||
# main when this was forked (including the one patch commit). This uses a fork
|
||||
# of the breakpad mainline that automatically daisy-chains out to any previously
|
||||
# installed signal handlers (instead of overwriting them).
|
||||
git checkout 5485e473ed46d065e05489e50dfc59d90dfd7e22
|
||||
|
||||
git clone https://chromium.googlesource.com/linux-syscall-support src/third_party/lss
|
||||
pushd src/third_party/lss
|
||||
# same as with breakpad, there are no real releases for this repo so use a
|
||||
# commit as the pin
|
||||
git checkout e1e7b0ad8ee99a875b272c8e33e308472e897660
|
||||
popd
|
||||
|
||||
./configure
|
||||
make
|
||||
make install
|
||||
popd
|
||||
rm -rf breakpad
|
||||
@ -1,117 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
install_ubuntu() {
|
||||
echo "Preparing to build sccache from source"
|
||||
apt-get update
|
||||
apt-get install -y cargo pkg-config libssl-dev
|
||||
echo "Checking out sccache repo"
|
||||
git clone https://github.com/pytorch/sccache
|
||||
cd sccache
|
||||
echo "Building sccache"
|
||||
cargo build --release
|
||||
cp target/release/sccache /opt/cache/bin
|
||||
echo "Cleaning up"
|
||||
cd ..
|
||||
rm -rf sccache
|
||||
apt-get remove -y cargo rustc
|
||||
apt-get autoclean && apt-get clean
|
||||
}
|
||||
|
||||
install_binary() {
|
||||
echo "Downloading sccache binary from S3 repo"
|
||||
curl --retry 3 https://s3.amazonaws.com/ossci-linux/sccache -o /opt/cache/bin/sccache
|
||||
}
|
||||
|
||||
mkdir -p /opt/cache/bin
|
||||
mkdir -p /opt/cache/lib
|
||||
sed -e 's|PATH="\(.*\)"|PATH="/opt/cache/bin:\1"|g' -i /etc/environment
|
||||
export PATH="/opt/cache/bin:$PATH"
|
||||
|
||||
# Setup compiler cache
|
||||
if [ -n "$ROCM_VERSION" ]; then
|
||||
curl --retry 3 http://repo.radeon.com/misc/.sccache_amd/sccache -o /opt/cache/bin/sccache
|
||||
else
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
*)
|
||||
install_binary
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
chmod a+x /opt/cache/bin/sccache
|
||||
|
||||
function write_sccache_stub() {
|
||||
printf "#!/bin/sh\nif [ \$(ps -p \$PPID -o comm=) != sccache ]; then\n exec sccache $(which $1) \"\$@\"\nelse\n exec $(which $1) \"\$@\"\nfi" > "/opt/cache/bin/$1"
|
||||
chmod a+x "/opt/cache/bin/$1"
|
||||
}
|
||||
|
||||
write_sccache_stub cc
|
||||
write_sccache_stub c++
|
||||
write_sccache_stub gcc
|
||||
write_sccache_stub g++
|
||||
|
||||
# NOTE: See specific ROCM_VERSION case below.
|
||||
if [ "x$ROCM_VERSION" = x ]; then
|
||||
write_sccache_stub clang
|
||||
write_sccache_stub clang++
|
||||
fi
|
||||
|
||||
if [ -n "$CUDA_VERSION" ]; then
|
||||
# TODO: This is a workaround for the fact that PyTorch's FindCUDA
|
||||
# implementation cannot find nvcc if it is setup this way, because it
|
||||
# appears to search for the nvcc in PATH, and use its path to infer
|
||||
# where CUDA is installed. Instead, we install an nvcc symlink outside
|
||||
# of the PATH, and set CUDA_NVCC_EXECUTABLE so that we make use of it.
|
||||
|
||||
write_sccache_stub nvcc
|
||||
mv /opt/cache/bin/nvcc /opt/cache/lib/
|
||||
fi
|
||||
|
||||
if [ -n "$ROCM_VERSION" ]; then
|
||||
# ROCm compiler is hcc or clang. However, it is commonly invoked via hipcc wrapper.
|
||||
# hipcc will call either hcc or clang using an absolute path starting with /opt/rocm,
|
||||
# causing the /opt/cache/bin to be skipped. We must create the sccache wrappers
|
||||
# directly under /opt/rocm while also preserving the original compiler names.
|
||||
# Note symlinks will chain as follows: [hcc or clang++] -> clang -> clang-??
|
||||
# Final link in symlink chain must point back to original directory.
|
||||
|
||||
# Original compiler is moved one directory deeper. Wrapper replaces it.
|
||||
function write_sccache_stub_rocm() {
|
||||
OLDCOMP=$1
|
||||
COMPNAME=$(basename $OLDCOMP)
|
||||
TOPDIR=$(dirname $OLDCOMP)
|
||||
WRAPPED="$TOPDIR/original/$COMPNAME"
|
||||
mv "$OLDCOMP" "$WRAPPED"
|
||||
printf "#!/bin/sh\nexec sccache $WRAPPED \"\$@\"" > "$OLDCOMP"
|
||||
chmod a+x "$OLDCOMP"
|
||||
}
|
||||
|
||||
if [[ -e "/opt/rocm/hcc/bin/hcc" ]]; then
|
||||
# ROCm 3.3 or earlier.
|
||||
mkdir /opt/rocm/hcc/bin/original
|
||||
write_sccache_stub_rocm /opt/rocm/hcc/bin/hcc
|
||||
write_sccache_stub_rocm /opt/rocm/hcc/bin/clang
|
||||
write_sccache_stub_rocm /opt/rocm/hcc/bin/clang++
|
||||
# Fix last link in symlink chain, clang points to versioned clang in prior dir
|
||||
pushd /opt/rocm/hcc/bin/original
|
||||
ln -s ../$(readlink clang)
|
||||
popd
|
||||
elif [[ -e "/opt/rocm/llvm/bin/clang" ]]; then
|
||||
# ROCm 3.5 and beyond.
|
||||
mkdir /opt/rocm/llvm/bin/original
|
||||
write_sccache_stub_rocm /opt/rocm/llvm/bin/clang
|
||||
write_sccache_stub_rocm /opt/rocm/llvm/bin/clang++
|
||||
# Fix last link in symlink chain, clang points to versioned clang in prior dir
|
||||
pushd /opt/rocm/llvm/bin/original
|
||||
ln -s ../$(readlink clang)
|
||||
popd
|
||||
else
|
||||
echo "Cannot find ROCm compiler."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$CLANG_VERSION" ]; then
|
||||
|
||||
if [[ $CLANG_VERSION == 7 && $UBUNTU_VERSION == 16.04 ]]; then
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main"
|
||||
elif [[ $CLANG_VERSION == 9 && $UBUNTU_VERSION == 18.04 ]]; then
|
||||
sudo apt-get update
|
||||
# gpg-agent is not available by default on 18.04
|
||||
sudo apt-get install -y --no-install-recommends gpg-agent
|
||||
wget --no-check-certificate -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
apt-add-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-${CLANG_VERSION} main"
|
||||
fi
|
||||
|
||||
sudo apt-get update
|
||||
apt-get install -y --no-install-recommends clang-"$CLANG_VERSION"
|
||||
apt-get install -y --no-install-recommends llvm-"$CLANG_VERSION"
|
||||
|
||||
# Install dev version of LLVM.
|
||||
if [ -n "$LLVMDEV" ]; then
|
||||
sudo apt-get install -y --no-install-recommends llvm-"$CLANG_VERSION"-dev
|
||||
fi
|
||||
|
||||
# Use update-alternatives to make this version the default
|
||||
# TODO: Decide if overriding gcc as well is a good idea
|
||||
# update-alternatives --install /usr/bin/gcc gcc /usr/bin/clang-"$CLANG_VERSION" 50
|
||||
# update-alternatives --install /usr/bin/g++ g++ /usr/bin/clang++-"$CLANG_VERSION" 50
|
||||
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-"$CLANG_VERSION" 50
|
||||
update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-"$CLANG_VERSION" 50
|
||||
|
||||
# clang's packaging is a little messed up (the runtime libs aren't
|
||||
# added into the linker path), so give it a little help
|
||||
clang_lib=("/usr/lib/llvm-$CLANG_VERSION/lib/clang/"*"/lib/linux")
|
||||
echo "$clang_lib" > /etc/ld.so.conf.d/clang.conf
|
||||
ldconfig
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
fi
|
||||
@ -1,139 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# Optionally install conda
|
||||
if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
BASE_URL="https://repo.anaconda.com/miniconda"
|
||||
|
||||
MAJOR_PYTHON_VERSION=$(echo "$ANACONDA_PYTHON_VERSION" | cut -d . -f 1)
|
||||
|
||||
case "$MAJOR_PYTHON_VERSION" in
|
||||
2)
|
||||
CONDA_FILE="Miniconda2-latest-Linux-x86_64.sh"
|
||||
;;
|
||||
3)
|
||||
CONDA_FILE="Miniconda3-latest-Linux-x86_64.sh"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported ANACONDA_PYTHON_VERSION: $ANACONDA_PYTHON_VERSION"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir /opt/conda
|
||||
chown jenkins:jenkins /opt/conda
|
||||
|
||||
# Work around bug where devtoolset replaces sudo and breaks it.
|
||||
if [ -n "$DEVTOOLSET_VERSION" ]; then
|
||||
SUDO=/bin/sudo
|
||||
else
|
||||
SUDO=sudo
|
||||
fi
|
||||
|
||||
as_jenkins() {
|
||||
# NB: unsetting the environment variables works around a conda bug
|
||||
# https://github.com/conda/conda/issues/6576
|
||||
# NB: Pass on PATH and LD_LIBRARY_PATH to sudo invocation
|
||||
# NB: This must be run from a directory that jenkins has access to,
|
||||
# works around https://github.com/conda/conda-package-handling/pull/34
|
||||
$SUDO -H -u jenkins env -u SUDO_UID -u SUDO_GID -u SUDO_COMMAND -u SUDO_USER env "PATH=$PATH" "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" $*
|
||||
}
|
||||
|
||||
pushd /tmp
|
||||
wget -q "${BASE_URL}/${CONDA_FILE}"
|
||||
chmod +x "${CONDA_FILE}"
|
||||
as_jenkins ./"${CONDA_FILE}" -b -f -p "/opt/conda"
|
||||
popd
|
||||
|
||||
# NB: Don't do this, rely on the rpath to get it right
|
||||
#echo "/opt/conda/lib" > /etc/ld.so.conf.d/conda-python.conf
|
||||
#ldconfig
|
||||
sed -e 's|PATH="\(.*\)"|PATH="/opt/conda/bin:\1"|g' -i /etc/environment
|
||||
export PATH="/opt/conda/bin:$PATH"
|
||||
|
||||
# Ensure we run conda in a directory that jenkins has write access to
|
||||
pushd /opt/conda
|
||||
|
||||
# Track latest conda update
|
||||
as_jenkins conda update -y -n base conda
|
||||
|
||||
# Install correct Python version
|
||||
as_jenkins conda install -y python="$ANACONDA_PYTHON_VERSION"
|
||||
|
||||
conda_install() {
|
||||
# Ensure that the install command don't upgrade/downgrade Python
|
||||
# This should be called as
|
||||
# conda_install pkg1 pkg2 ... [-c channel]
|
||||
as_jenkins conda install -q -y python="$ANACONDA_PYTHON_VERSION" $*
|
||||
}
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
# DO NOT install cmake here as it would install a version newer than 3.5, but
|
||||
# we want to pin to version 3.5.
|
||||
SCIPY_VERSION=1.1.0
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
conda_install numpy=1.19.2 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0 -c conda-forge
|
||||
SCIPY_VERSION=1.6.0
|
||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0
|
||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.7" ]; then
|
||||
# DO NOT install dataclasses if installing python-3.7, since its part of python-3.7 core packages
|
||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six typing_extensions
|
||||
else
|
||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six dataclasses typing_extensions
|
||||
fi
|
||||
|
||||
if [[ "$CUDA_VERSION" == 10.0* ]]; then
|
||||
conda_install magma-cuda100 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 10.1* ]]; then
|
||||
conda_install magma-cuda101 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 10.2* ]]; then
|
||||
conda_install magma-cuda102 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 11.0* ]]; then
|
||||
conda_install magma-cuda110 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 11.1* ]]; then
|
||||
conda_install magma-cuda111 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 11.3* ]]; then
|
||||
conda_install magma-cuda113 -c pytorch
|
||||
fi
|
||||
|
||||
# TODO: This isn't working atm
|
||||
conda_install nnpack -c killeent
|
||||
|
||||
# Install some other packages, including those needed for Python test reporting
|
||||
# TODO: Why is scipy pinned
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
# Pin coverage so we can use COVERAGE_RCFILE
|
||||
as_jenkins pip install --progress-bar off pytest \
|
||||
scipy==$SCIPY_VERSION \
|
||||
scikit-image \
|
||||
psutil \
|
||||
unittest-xml-reporting \
|
||||
boto3==1.16.34 \
|
||||
coverage==5.5 \
|
||||
hypothesis==4.53.2 \
|
||||
mypy==0.812 \
|
||||
tb-nightly
|
||||
|
||||
# Install numba only on python-3.8 or below
|
||||
# For numba issue see https://github.com/pytorch/pytorch/issues/51511
|
||||
if [[ $(python -c "import sys; print(int(sys.version_info < (3, 9)))") == "1" ]]; then
|
||||
as_jenkins pip install --progress-bar off numba librosa>=0.6.2
|
||||
else
|
||||
as_jenkins pip install --progress-bar off numba==0.49.0 librosa>=0.6.2
|
||||
fi
|
||||
|
||||
# Update scikit-learn to a python-3.8 compatible version
|
||||
if [[ $(python -c "import sys; print(int(sys.version_info >= (3, 8)))") == "1" ]]; then
|
||||
as_jenkins pip install --progress-bar off -U scikit-learn
|
||||
else
|
||||
# Pinned scikit-learn due to https://github.com/scikit-learn/scikit-learn/issues/14485 (affects gcc 5.5 only)
|
||||
as_jenkins pip install --progress-bar off scikit-learn==0.20.3
|
||||
fi
|
||||
|
||||
popd
|
||||
fi
|
||||
@ -1,66 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# This function installs protobuf 2.6
|
||||
install_protobuf_26() {
|
||||
pb_dir="/usr/temp_pb_install_dir"
|
||||
mkdir -p $pb_dir
|
||||
|
||||
# On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or
|
||||
# else it will fail with
|
||||
# g++: error: ./../lib64/crti.o: No such file or directory
|
||||
ln -s /usr/lib64 "$pb_dir/lib64"
|
||||
|
||||
curl -LO "https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz"
|
||||
tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-2.6.1.tar.gz
|
||||
pushd "$pb_dir" && ./configure && make && make check && sudo make install && sudo ldconfig
|
||||
popd
|
||||
rm -rf $pb_dir
|
||||
}
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends \
|
||||
libhiredis-dev \
|
||||
libleveldb-dev \
|
||||
liblmdb-dev \
|
||||
libsnappy-dev
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
# Need EPEL for many packages we depend on.
|
||||
# See http://fedoraproject.org/wiki/EPEL
|
||||
yum --enablerepo=extras install -y epel-release
|
||||
|
||||
yum install -y \
|
||||
hiredis-devel \
|
||||
leveldb-devel \
|
||||
lmdb-devel \
|
||||
snappy-devel
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
rm -rf /var/lib/yum/yumdb
|
||||
rm -rf /var/lib/yum/history
|
||||
}
|
||||
|
||||
# Install base packages depending on the base OS
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
centos)
|
||||
install_centos
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$GCC_VERSION" ]; then
|
||||
|
||||
# Need the official toolchain repo to get alternate packages
|
||||
add-apt-repository ppa:ubuntu-toolchain-r/test
|
||||
apt-get update
|
||||
if [ "$UBUNTU_VERSION" = "16.04" -a "$GCC_VERSION" = "5" ]; then
|
||||
apt-get install -y g++-5=5.4.0-6ubuntu1~16.04.12
|
||||
else
|
||||
apt-get install -y g++-$GCC_VERSION
|
||||
fi
|
||||
|
||||
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50
|
||||
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50
|
||||
update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
fi
|
||||
@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
mkdir -p /usr/local/include
|
||||
cp jni.h /usr/local/include
|
||||
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$KATEX" ]; then
|
||||
|
||||
curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
|
||||
sudo apt-get install -y nodejs
|
||||
|
||||
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
|
||||
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends yarn
|
||||
yarn global add katex --prefix /usr/local
|
||||
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
fi
|
||||
@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
git clone --branch v1.15 https://github.com/linux-test-project/lcov.git
|
||||
pushd lcov
|
||||
sudo make install # will be installed in /usr/local/bin/lcov
|
||||
popd
|
||||
@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo apt-get -qq update
|
||||
sudo apt-get -qq install --allow-downgrades --allow-change-held-packages libnccl-dev=2.5.6-1+cuda10.1 libnccl2=2.5.6-1+cuda10.1
|
||||
@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
[ -n "$NINJA_VERSION" ]
|
||||
|
||||
url="https://github.com/ninja-build/ninja/releases/download/v${NINJA_VERSION}/ninja-linux.zip"
|
||||
|
||||
pushd /tmp
|
||||
wget --no-verbose --output-document=ninja-linux.zip "$url"
|
||||
unzip ninja-linux.zip -d /usr/local/bin
|
||||
rm -f ninja-linux.zip
|
||||
popd
|
||||
@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --allow-downgrades --allow-change-held-packages openmpi-bin libopenmpi-dev
|
||||
@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
OPENSSL=openssl-1.1.1k
|
||||
|
||||
wget -q -O "${OPENSSL}.tar.gz" "https://www.openssl.org/source/${OPENSSL}.tar.gz"
|
||||
tar xf "${OPENSSL}.tar.gz"
|
||||
cd "${OPENSSL}"
|
||||
./config --prefix=/opt/openssl -d '-Wl,--enable-new-dtags,-rpath,$(LIBRPATH)'
|
||||
# NOTE: opensl errors out when built with the -j option
|
||||
make install_sw
|
||||
cd ..
|
||||
rm -rf "${OPENSSL}"
|
||||
@ -1,61 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# This function installs protobuf 2.6
|
||||
install_protobuf_26() {
|
||||
pb_dir="/usr/temp_pb_install_dir"
|
||||
mkdir -p $pb_dir
|
||||
|
||||
# On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or
|
||||
# else it will fail with
|
||||
# g++: error: ./../lib64/crti.o: No such file or directory
|
||||
ln -s /usr/lib64 "$pb_dir/lib64"
|
||||
|
||||
curl -LO "https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz"
|
||||
tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-2.6.1.tar.gz
|
||||
pushd "$pb_dir" && ./configure && make && make check && sudo make install && sudo ldconfig
|
||||
popd
|
||||
rm -rf $pb_dir
|
||||
}
|
||||
|
||||
install_ubuntu() {
|
||||
# Ubuntu 14.04 ships with protobuf 2.5, but ONNX needs protobuf >= 2.6
|
||||
# so we install that here if on 14.04
|
||||
# Ubuntu 14.04 also has cmake 2.8.12 as the default option, so we will
|
||||
# install cmake3 here and use cmake3.
|
||||
apt-get update
|
||||
if [[ "$UBUNTU_VERSION" == 14.04 ]]; then
|
||||
apt-get install -y --no-install-recommends cmake3
|
||||
install_protobuf_26
|
||||
else
|
||||
apt-get install -y --no-install-recommends \
|
||||
libprotobuf-dev \
|
||||
protobuf-compiler
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
# Centos7 ships with protobuf 2.5, but ONNX needs protobuf >= 2.6
|
||||
# so we always install install that here
|
||||
install_protobuf_26
|
||||
}
|
||||
|
||||
# Install base packages depending on the base OS
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
centos)
|
||||
install_centos
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@ -1,123 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
install_magma() {
|
||||
# "install" hipMAGMA into /opt/rocm/magma by copying after build
|
||||
git clone https://bitbucket.org/icl/magma.git
|
||||
pushd magma
|
||||
git checkout 878b1ce02e9cfe4a829be22c8f911e9c0b6bd88f
|
||||
cp make.inc-examples/make.inc.hip-gcc-mkl make.inc
|
||||
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
|
||||
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc
|
||||
echo 'DEVCCFLAGS += --amdgpu-target=gfx803 --amdgpu-target=gfx900 --amdgpu-target=gfx906 --amdgpu-target=gfx908 --gpu-max-threads-per-block=256' >> make.inc
|
||||
# hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition
|
||||
sed -i 's/^FOPENMP/#FOPENMP/g' make.inc
|
||||
export PATH="${PATH}:/opt/rocm/bin"
|
||||
make -f make.gen.hipMAGMA -j $(nproc)
|
||||
LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda
|
||||
make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda
|
||||
popd
|
||||
mv magma /opt/rocm
|
||||
}
|
||||
|
||||
ver() {
|
||||
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
|
||||
}
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
if [[ $UBUNTU_VERSION == 18.04 ]]; then
|
||||
# gpg-agent is not available by default on 18.04
|
||||
apt-get install -y --no-install-recommends gpg-agent
|
||||
fi
|
||||
apt-get install -y kmod
|
||||
apt-get install -y wget
|
||||
|
||||
# Need the libc++1 and libc++abi1 libraries to allow torch._C to load at runtime
|
||||
apt-get install -y libc++1
|
||||
apt-get install -y libc++abi1
|
||||
|
||||
ROCM_REPO="ubuntu"
|
||||
if [[ $(ver $ROCM_VERSION) -lt $(ver 4.2) ]]; then
|
||||
ROCM_REPO="xenial"
|
||||
fi
|
||||
|
||||
# Add rocm repository
|
||||
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
|
||||
echo "deb [arch=amd64] http://repo.radeon.com/rocm/apt/${ROCM_VERSION} ${ROCM_REPO} main" > /etc/apt/sources.list.d/rocm.list
|
||||
apt-get update --allow-insecure-repositories
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
|
||||
rocm-dev \
|
||||
rocm-utils \
|
||||
rocm-libs \
|
||||
rccl \
|
||||
rocprofiler-dev \
|
||||
roctracer-dev
|
||||
|
||||
# precompiled miopen kernels added in ROCm 3.5; search for all unversioned packages
|
||||
# if search fails it will abort this script; use true to avoid case where search fails
|
||||
MIOPENKERNELS=$(apt-cache search --names-only miopenkernels | awk '{print $1}' | grep -F -v . || true)
|
||||
if [[ "x${MIOPENKERNELS}" = x ]]; then
|
||||
echo "miopenkernels package not available"
|
||||
else
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ${MIOPENKERNELS}
|
||||
fi
|
||||
|
||||
install_magma
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
|
||||
yum update -y
|
||||
yum install -y kmod
|
||||
yum install -y wget
|
||||
yum install -y openblas-devel
|
||||
|
||||
yum install -y epel-release
|
||||
yum install -y dkms kernel-headers-`uname -r` kernel-devel-`uname -r`
|
||||
|
||||
echo "[ROCm]" > /etc/yum.repos.d/rocm.repo
|
||||
echo "name=ROCm" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "baseurl=http://repo.radeon.com/rocm/yum/${ROCM_VERSION}" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "enabled=1" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "gpgcheck=0" >> /etc/yum.repos.d/rocm.repo
|
||||
|
||||
yum update -y
|
||||
|
||||
yum install -y \
|
||||
rocm-dev \
|
||||
rocm-utils \
|
||||
rocm-libs \
|
||||
rccl \
|
||||
rocprofiler-dev \
|
||||
roctracer-dev
|
||||
|
||||
install_magma
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
rm -rf /var/lib/yum/yumdb
|
||||
rm -rf /var/lib/yum/history
|
||||
}
|
||||
|
||||
# Install Python packages depending on the base OS
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
centos)
|
||||
install_centos
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
[ -n "${SWIFTSHADER}" ]
|
||||
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
|
||||
_https_amazon_aws=https://ossci-android.s3.amazonaws.com
|
||||
|
||||
# SwiftShader
|
||||
_swiftshader_dir=/var/lib/jenkins/swiftshader
|
||||
_swiftshader_file_targz=swiftshader-abe07b943-prebuilt.tar.gz
|
||||
mkdir -p $_swiftshader_dir
|
||||
_tmp_swiftshader_targz="/tmp/${_swiftshader_file_targz}"
|
||||
|
||||
curl --silent --show-error --location --fail --retry 3 \
|
||||
--output "${_tmp_swiftshader_targz}" "$_https_amazon_aws/${_swiftshader_file_targz}"
|
||||
|
||||
tar -C "${_swiftshader_dir}" -xzf "${_tmp_swiftshader_targz}"
|
||||
|
||||
export VK_ICD_FILENAMES="${_swiftshader_dir}/build/Linux/vk_swiftshader_icd.json"
|
||||
@ -1,14 +0,0 @@
|
||||
apt-get update
|
||||
apt-get install -y sudo wget libboost-dev libboost-test-dev libboost-program-options-dev libboost-filesystem-dev libboost-thread-dev libevent-dev automake libtool flex bison pkg-config g++ libssl-dev
|
||||
wget https://www-us.apache.org/dist/thrift/0.12.0/thrift-0.12.0.tar.gz
|
||||
tar -xvf thrift-0.12.0.tar.gz
|
||||
cd thrift-0.12.0
|
||||
for file in ./compiler/cpp/Makefile*; do
|
||||
sed -i 's/\-Werror//' $file
|
||||
done
|
||||
./bootstrap.sh
|
||||
./configure --without-php --without-java --without-python --without-nodejs --without-go --without-ruby
|
||||
sudo make
|
||||
sudo make install
|
||||
cd ..
|
||||
rm thrift-0.12.0.tar.gz
|
||||
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# Mirror jenkins user in container
|
||||
echo "jenkins:x:1014:1014::/var/lib/jenkins:" >> /etc/passwd
|
||||
echo "jenkins:x:1014:" >> /etc/group
|
||||
|
||||
# Create $HOME
|
||||
mkdir -p /var/lib/jenkins
|
||||
chown jenkins:jenkins /var/lib/jenkins
|
||||
mkdir -p /var/lib/jenkins/.ccache
|
||||
chown jenkins:jenkins /var/lib/jenkins/.ccache
|
||||
|
||||
# Allow writing to /usr/local (for make install)
|
||||
chown jenkins:jenkins /usr/local
|
||||
|
||||
# Allow sudo
|
||||
# TODO: Maybe we shouldn't
|
||||
echo 'jenkins ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/jenkins
|
||||
@ -1,62 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
# This function installs protobuf 2.6
|
||||
install_protobuf_26() {
|
||||
pb_dir="/usr/temp_pb_install_dir"
|
||||
mkdir -p $pb_dir
|
||||
|
||||
# On the nvidia/cuda:9-cudnn7-devel-centos7 image we need this symlink or
|
||||
# else it will fail with
|
||||
# g++: error: ./../lib64/crti.o: No such file or directory
|
||||
ln -s /usr/lib64 "$pb_dir/lib64"
|
||||
|
||||
curl -LO "https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz"
|
||||
tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-2.6.1.tar.gz
|
||||
pushd "$pb_dir" && ./configure && make && make check && sudo make install && sudo ldconfig
|
||||
popd
|
||||
rm -rf $pb_dir
|
||||
}
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends \
|
||||
libopencv-dev \
|
||||
libavcodec-dev
|
||||
|
||||
# Cleanup
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
}
|
||||
|
||||
install_centos() {
|
||||
# Need EPEL for many packages we depend on.
|
||||
# See http://fedoraproject.org/wiki/EPEL
|
||||
yum --enablerepo=extras install -y epel-release
|
||||
|
||||
yum install -y \
|
||||
opencv-devel \
|
||||
ffmpeg-devel
|
||||
|
||||
# Cleanup
|
||||
yum clean all
|
||||
rm -rf /var/cache/yum
|
||||
rm -rf /var/lib/yum/yumdb
|
||||
rm -rf /var/lib/yum/history
|
||||
}
|
||||
|
||||
# Install base packages depending on the base OS
|
||||
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
|
||||
case "$ID" in
|
||||
ubuntu)
|
||||
install_ubuntu
|
||||
;;
|
||||
centos)
|
||||
install_centos
|
||||
;;
|
||||
*)
|
||||
echo "Unable to determine OS..."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
[ -n "${VULKAN_SDK_VERSION}" ]
|
||||
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
|
||||
_vulkansdk_dir=/var/lib/jenkins/vulkansdk
|
||||
_tmp_vulkansdk_targz=/tmp/vulkansdk.tar.gz
|
||||
|
||||
curl \
|
||||
--silent \
|
||||
--show-error \
|
||||
--location \
|
||||
--fail \
|
||||
--retry 3 \
|
||||
--output "${_tmp_vulkansdk_targz}" "https://ossci-android.s3.amazonaws.com/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz"
|
||||
|
||||
mkdir -p "${_vulkansdk_dir}"
|
||||
tar -C "${_vulkansdk_dir}" -xzf "${_tmp_vulkansdk_targz}" --strip-components 1
|
||||
rm -rf "${_tmp_vulkansdk_targz}"
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,101 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG CUDNN_VERSION
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG CUDNN_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install user
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
ADD ./common/install_katex.sh install_katex.sh
|
||||
RUN bash ./install_katex.sh && rm install_katex.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, coverage, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
ADD ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install clang
|
||||
ARG CLANG_VERSION
|
||||
ADD ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
ENV CUDA_NVCC_EXECUTABLE=/opt/cache/lib/nvcc
|
||||
|
||||
# Add jni.h for java host build
|
||||
ADD ./common/install_jni.sh install_jni.sh
|
||||
ADD ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Install NCCL for when CUDA is version 10.1
|
||||
ADD ./common/install_nccl.sh install_nccl.sh
|
||||
RUN if [ "${CUDA_VERSION}" = 10.1 ]; then bash ./install_nccl.sh; fi
|
||||
RUN rm install_nccl.sh
|
||||
|
||||
# Install Open MPI for CUDA
|
||||
ADD ./common/install_openmpi.sh install_openmpi.sh
|
||||
RUN if [ -n "${CUDA_VERSION}" ]; then bash install_openmpi.sh; fi
|
||||
RUN rm install_openmpi.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
ADD ./common/install_openssl.sh install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
RUN bash ./install_openssl.sh
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,92 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install clang
|
||||
ARG LLVMDEV
|
||||
ARG CLANG_VERSION
|
||||
ADD ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# Install user
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, coverage, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
ADD ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# Install rocm
|
||||
ARG ROCM_VERSION
|
||||
ADD ./common/install_rocm.sh install_rocm.sh
|
||||
RUN bash ./install_rocm.sh
|
||||
RUN rm install_rocm.sh
|
||||
ENV PATH /opt/rocm/bin:$PATH
|
||||
ENV PATH /opt/rocm/hcc/bin:$PATH
|
||||
ENV PATH /opt/rocm/hip/bin:$PATH
|
||||
ENV PATH /opt/rocm/opencl/bin:$PATH
|
||||
ENV PATH /opt/rocm/llvm/bin:$PATH
|
||||
ENV MAGMA_HOME /opt/rocm/magma
|
||||
ENV LANG C.UTF-8
|
||||
ENV LC_ALL C.UTF-8
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
ADD ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
ADD ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,138 +0,0 @@
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
FROM ubuntu:${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Install clang
|
||||
ARG LLVMDEV
|
||||
ARG CLANG_VERSION
|
||||
ADD ./common/install_clang.sh install_clang.sh
|
||||
RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
|
||||
# (optional) Install thrift.
|
||||
ARG THRIFT
|
||||
ADD ./common/install_thrift.sh install_thrift.sh
|
||||
RUN if [ -n "${THRIFT}" ]; then bash ./install_thrift.sh; fi
|
||||
RUN rm install_thrift.sh
|
||||
ENV INSTALLED_THRIFT ${THRIFT}
|
||||
|
||||
# Install user
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install katex
|
||||
ARG KATEX
|
||||
ADD ./common/install_katex.sh install_katex.sh
|
||||
RUN bash ./install_katex.sh && rm install_katex.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, coverage, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
ADD ./common/install_gcc.sh install_gcc.sh
|
||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
||||
|
||||
# Install lcov for C++ code coverage
|
||||
ADD ./common/install_lcov.sh install_lcov.sh
|
||||
RUN bash ./install_lcov.sh && rm install_lcov.sh
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||
RUN if [ -n "${PROTOBUF}" ]; then bash ./install_protobuf.sh; fi
|
||||
RUN rm install_protobuf.sh
|
||||
ENV INSTALLED_PROTOBUF ${PROTOBUF}
|
||||
|
||||
# (optional) Install database packages like LMDB and LevelDB
|
||||
ARG DB
|
||||
ADD ./common/install_db.sh install_db.sh
|
||||
RUN if [ -n "${DB}" ]; then bash ./install_db.sh; fi
|
||||
RUN rm install_db.sh
|
||||
ENV INSTALLED_DB ${DB}
|
||||
|
||||
# (optional) Install vision packages like OpenCV and ffmpeg
|
||||
ARG VISION
|
||||
ADD ./common/install_vision.sh install_vision.sh
|
||||
RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
||||
RUN rm install_vision.sh
|
||||
ENV INSTALLED_VISION ${VISION}
|
||||
|
||||
# (optional) Install Android NDK
|
||||
ARG ANDROID
|
||||
ARG ANDROID_NDK
|
||||
ARG GRADLE_VERSION
|
||||
ADD ./common/install_android.sh install_android.sh
|
||||
ADD ./android/AndroidManifest.xml AndroidManifest.xml
|
||||
ADD ./android/build.gradle build.gradle
|
||||
RUN if [ -n "${ANDROID}" ]; then bash ./install_android.sh; fi
|
||||
RUN rm install_android.sh
|
||||
RUN rm AndroidManifest.xml
|
||||
RUN rm build.gradle
|
||||
ENV INSTALLED_ANDROID ${ANDROID}
|
||||
|
||||
# (optional) Install breakpad
|
||||
ARG BREAKPAD
|
||||
ADD ./common/install_breakpad.sh install_breakpad.sh
|
||||
RUN if [ -n "${BREAKPAD}" ]; then bash ./install_breakpad.sh; fi
|
||||
RUN rm install_breakpad.sh
|
||||
ENV INSTALLED_BREAKPAD ${BREAKPAD}
|
||||
|
||||
# (optional) Install Vulkan SDK
|
||||
ARG VULKAN_SDK_VERSION
|
||||
ADD ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh
|
||||
RUN if [ -n "${VULKAN_SDK_VERSION}" ]; then bash ./install_vulkan_sdk.sh; fi
|
||||
RUN rm install_vulkan_sdk.sh
|
||||
|
||||
# (optional) Install swiftshader
|
||||
ARG SWIFTSHADER
|
||||
ADD ./common/install_swiftshader.sh install_swiftshader.sh
|
||||
RUN if [ -n "${SWIFTSHADER}" ]; then bash ./install_swiftshader.sh; fi
|
||||
RUN rm install_swiftshader.sh
|
||||
|
||||
# (optional) Install non-default CMake version
|
||||
ARG CMAKE_VERSION
|
||||
ADD ./common/install_cmake.sh install_cmake.sh
|
||||
RUN if [ -n "${CMAKE_VERSION}" ]; then bash ./install_cmake.sh; fi
|
||||
RUN rm install_cmake.sh
|
||||
|
||||
# (optional) Install non-default Ninja version
|
||||
ARG NINJA_VERSION
|
||||
ADD ./common/install_ninja.sh install_ninja.sh
|
||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||
RUN rm install_ninja.sh
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
|
||||
# Add jni.h for java host build
|
||||
ADD ./common/install_jni.sh install_jni.sh
|
||||
ADD ./java/jni.h jni.h
|
||||
RUN bash ./install_jni.sh && rm install_jni.sh
|
||||
|
||||
# Include BUILD_ENVIRONMENT environment variable in image
|
||||
ARG BUILD_ENVIRONMENT
|
||||
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
ADD ./common/install_openssl.sh install_openssl.sh
|
||||
RUN bash ./install_openssl.sh
|
||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
@ -1,13 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
RUN apt-get update && apt-get install -y python3-pip git && rm -rf /var/lib/apt/lists/* /var/log/dpkg.log
|
||||
|
||||
ADD requirements.txt /requirements.txt
|
||||
|
||||
RUN pip3 install -r /requirements.txt
|
||||
|
||||
ADD gc.py /usr/bin/gc.py
|
||||
|
||||
ADD docker_hub.py /usr/bin/docker_hub.py
|
||||
|
||||
ENTRYPOINT ["/usr/bin/gc.py"]
|
||||
@ -1,125 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
import boto3
|
||||
import requests
|
||||
import os
|
||||
|
||||
|
||||
IMAGE_INFO = namedtuple(
|
||||
"IMAGE_INFO", ("repo", "tag", "size", "last_updated_at", "last_updated_by")
|
||||
)
|
||||
|
||||
|
||||
def build_access_token(username, passwordtr):
|
||||
r = requests.post(
|
||||
"https://hub.docker.com/v2/users/login/",
|
||||
data={"username": username, "password": password},
|
||||
)
|
||||
r.raise_for_status()
|
||||
token = r.json().get("token")
|
||||
return {"Authorization": "JWT " + token}
|
||||
|
||||
|
||||
def list_repos(user, token):
|
||||
r = requests.get("https://hub.docker.com/v2/repositories/" + user, headers=token)
|
||||
r.raise_for_status()
|
||||
ret = sorted(
|
||||
repo["user"] + "/" + repo["name"] for repo in r.json().get("results", [])
|
||||
)
|
||||
if ret:
|
||||
print("repos found:")
|
||||
print("".join("\n\t" + r for r in ret))
|
||||
return ret
|
||||
|
||||
|
||||
def list_tags(repo, token):
|
||||
r = requests.get(
|
||||
"https://hub.docker.com/v2/repositories/" + repo + "/tags", headers=token
|
||||
)
|
||||
r.raise_for_status()
|
||||
return [
|
||||
IMAGE_INFO(
|
||||
repo=repo,
|
||||
tag=t["name"],
|
||||
size=t["full_size"],
|
||||
last_updated_at=t["last_updated"],
|
||||
last_updated_by=t["last_updater_username"],
|
||||
)
|
||||
for t in r.json().get("results", [])
|
||||
]
|
||||
|
||||
|
||||
def save_to_s3(tags):
|
||||
table_content = ""
|
||||
client = boto3.client("s3")
|
||||
for t in tags:
|
||||
table_content += (
|
||||
"<tr><td>{repo}</td><td>{tag}</td><td>{size}</td>"
|
||||
"<td>{last_updated_at}</td><td>{last_updated_by}</td></tr>"
|
||||
).format(
|
||||
repo=t.repo,
|
||||
tag=t.tag,
|
||||
size=t.size,
|
||||
last_updated_at=t.last_updated_at,
|
||||
last_updated_by=t.last_updated_by,
|
||||
)
|
||||
html_body = """
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet"
|
||||
href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
|
||||
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh"
|
||||
crossorigin="anonymous">
|
||||
<link rel="stylesheet" type="text/css"
|
||||
href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.css">
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js">
|
||||
</script>
|
||||
<script type="text/javascript" charset="utf8"
|
||||
src="https://cdn.datatables.net/1.10.20/js/jquery.dataTables.js"></script>
|
||||
<title> docker image info</title>
|
||||
</head>
|
||||
<body>
|
||||
<table class="table table-striped table-hover" id="docker">
|
||||
<caption>Docker images on docker hub</caption>
|
||||
<thead class="thead-dark">
|
||||
<tr>
|
||||
<th scope="col">repo</th>
|
||||
<th scope="col">tag</th>
|
||||
<th scope="col">size</th>
|
||||
<th scope="col">last_updated_at</th>
|
||||
<th scope="col">last_updated_by</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{table_content}
|
||||
</tbody>
|
||||
</table>
|
||||
</body>
|
||||
<script>
|
||||
$(document).ready( function () {{
|
||||
$('#docker').DataTable({{paging: false}});
|
||||
}} );py
|
||||
</script>
|
||||
</html>
|
||||
""".format(
|
||||
table_content=table_content
|
||||
)
|
||||
client.put_object(
|
||||
Bucket="docker.pytorch.org",
|
||||
ACL="public-read",
|
||||
Key="docker_hub.html",
|
||||
Body=html_body,
|
||||
ContentType="text/html",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
username = os.environ.get("DOCKER_HUB_USERNAME")
|
||||
password = os.environ.get("DOCKER_HUB_PASSWORD")
|
||||
token = build_access_token(username, password)
|
||||
tags = []
|
||||
for repo in list_repos("pytorch", token):
|
||||
tags.extend(list_tags(repo, token))
|
||||
save_to_s3(tags)
|
||||
@ -1,218 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import boto3
|
||||
import datetime
|
||||
import pytz
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def save_to_s3(project, data):
|
||||
table_content = ""
|
||||
client = boto3.client("s3")
|
||||
for repo, tag, window, age, pushed in data:
|
||||
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
|
||||
repo=repo, tag=tag, window=window, age=age, pushed=pushed
|
||||
)
|
||||
html_body = """
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet"
|
||||
href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
|
||||
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh"
|
||||
crossorigin="anonymous">
|
||||
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.css">
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
|
||||
<script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.20/js/jquery.dataTables.js"></script>
|
||||
<title>{project} nightly and permanent docker image info</title>
|
||||
</head>
|
||||
<body>
|
||||
<table class="table table-striped table-hover" id="docker">
|
||||
<thead class="thead-dark">
|
||||
<tr>
|
||||
<th scope="col">repo</th>
|
||||
<th scope="col">tag</th>
|
||||
<th scope="col">keep window</th>
|
||||
<th scope="col">age</th>
|
||||
<th scope="col">pushed at</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{table_content}
|
||||
</tbody>
|
||||
</table>
|
||||
</body>
|
||||
<script>
|
||||
$(document).ready( function () {{
|
||||
$('#docker').DataTable({{paging: false}});
|
||||
}} );
|
||||
</script>
|
||||
</html>
|
||||
""".format(
|
||||
project=project, table_content=table_content
|
||||
)
|
||||
|
||||
# for pytorch, file can be found at
|
||||
# http://ossci-docker.s3-website.us-east-1.amazonaws.com/pytorch.html
|
||||
# and later one we can config docker.pytorch.org to point to the location
|
||||
|
||||
client.put_object(
|
||||
Bucket="docker.pytorch.org",
|
||||
ACL="public-read",
|
||||
Key="{project}.html".format(project=project),
|
||||
Body=html_body,
|
||||
ContentType="text/html",
|
||||
)
|
||||
|
||||
|
||||
def repos(client):
|
||||
paginator = client.get_paginator("describe_repositories")
|
||||
pages = paginator.paginate(registryId="308535385114")
|
||||
for page in pages:
|
||||
for repo in page["repositories"]:
|
||||
yield repo
|
||||
|
||||
|
||||
def images(client, repository):
|
||||
paginator = client.get_paginator("describe_images")
|
||||
pages = paginator.paginate(
|
||||
registryId="308535385114", repositoryName=repository["repositoryName"]
|
||||
)
|
||||
for page in pages:
|
||||
for image in page["imageDetails"]:
|
||||
yield image
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Delete old Docker tags from registry")
|
||||
parser.add_argument(
|
||||
"--dry-run", action="store_true", help="Dry run; print tags that would be deleted"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug", action="store_true", help="Debug, print ignored / saved tags"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-stable-days",
|
||||
type=int,
|
||||
default=14,
|
||||
help="Days of stable Docker tags to keep (non per-build images)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-unstable-days",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Days of unstable Docker tags to keep (per-build images)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--filter-prefix",
|
||||
type=str,
|
||||
default="",
|
||||
help="Only run cleanup for repositories with this prefix",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ignore-tags",
|
||||
type=str,
|
||||
default="",
|
||||
help="Never cleanup these tags (comma separated)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.ignore_tags or not args.filter_prefix:
|
||||
print(
|
||||
"""
|
||||
Missing required arguments --ignore-tags and --filter-prefix
|
||||
|
||||
You must specify --ignore-tags and --filter-prefix to avoid accidentally
|
||||
pruning a stable Docker tag which is being actively used. This will
|
||||
make you VERY SAD. So pay attention.
|
||||
|
||||
First, which filter-prefix do you want? The list of valid prefixes
|
||||
is in jobs/private.groovy under the 'docker-registry-cleanup' job.
|
||||
You probably want either pytorch or caffe2.
|
||||
|
||||
Second, which ignore-tags do you want? It should be whatever the most
|
||||
up-to-date DockerVersion for the repository in question is. Follow
|
||||
the imports of jobs/pytorch.groovy to find them.
|
||||
"""
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
client = boto3.client("ecr", region_name="us-east-1")
|
||||
stable_window = datetime.timedelta(days=args.keep_stable_days)
|
||||
unstable_window = datetime.timedelta(days=args.keep_unstable_days)
|
||||
now = datetime.datetime.now(pytz.UTC)
|
||||
ignore_tags = args.ignore_tags.split(",")
|
||||
|
||||
|
||||
def chunks(chunkable, n):
|
||||
""" Yield successive n-sized chunks from l.
|
||||
"""
|
||||
for i in range(0, len(chunkable), n):
|
||||
yield chunkable[i: i + n]
|
||||
|
||||
|
||||
SHA_PATTERN = re.compile(r'^[0-9a-f]{40}$')
|
||||
|
||||
|
||||
def looks_like_git_sha(tag):
|
||||
"""Returns a boolean to check if a tag looks like a git sha
|
||||
|
||||
For reference a sha1 is 40 characters with only 0-9a-f and contains no
|
||||
"-" characters
|
||||
"""
|
||||
return re.match(SHA_PATTERN, tag) is not None
|
||||
|
||||
|
||||
stable_window_tags = []
|
||||
for repo in repos(client):
|
||||
repositoryName = repo["repositoryName"]
|
||||
if not repositoryName.startswith(args.filter_prefix):
|
||||
continue
|
||||
|
||||
# Keep list of image digests to delete for this repository
|
||||
digest_to_delete = []
|
||||
|
||||
for image in images(client, repo):
|
||||
tags = image.get("imageTags")
|
||||
if not isinstance(tags, (list,)) or len(tags) == 0:
|
||||
continue
|
||||
created = image["imagePushedAt"]
|
||||
age = now - created
|
||||
for tag in tags:
|
||||
if any([
|
||||
looks_like_git_sha(tag),
|
||||
tag.isdigit(),
|
||||
tag.count("-") == 4, # TODO: Remove, this no longer applies as tags are now built using a SHA1
|
||||
tag in ignore_tags]):
|
||||
window = stable_window
|
||||
if tag in ignore_tags:
|
||||
stable_window_tags.append((repositoryName, tag, "", age, created))
|
||||
elif age < window:
|
||||
stable_window_tags.append((repositoryName, tag, window, age, created))
|
||||
else:
|
||||
window = unstable_window
|
||||
|
||||
if tag in ignore_tags or age < window:
|
||||
if args.debug:
|
||||
print("Ignoring {}:{} (age: {})".format(repositoryName, tag, age))
|
||||
break
|
||||
else:
|
||||
for tag in tags:
|
||||
print("{}Deleting {}:{} (age: {})".format("(dry run) " if args.dry_run else "", repositoryName, tag, age))
|
||||
digest_to_delete.append(image["imageDigest"])
|
||||
if args.dry_run:
|
||||
if args.debug:
|
||||
print("Skipping actual deletion, moving on...")
|
||||
else:
|
||||
# Issue batch delete for all images to delete for this repository
|
||||
# Note that as of 2018-07-25, the maximum number of images you can
|
||||
# delete in a single batch is 100, so chunk our list into batches of
|
||||
# 100
|
||||
for c in chunks(digest_to_delete, 100):
|
||||
client.batch_delete_image(
|
||||
registryId="308535385114",
|
||||
repositoryName=repositoryName,
|
||||
imageIds=[{"imageDigest": digest} for digest in c],
|
||||
)
|
||||
|
||||
save_to_s3(args.filter_prefix, stable_window_tags)
|
||||
@ -1,3 +0,0 @@
|
||||
boto3
|
||||
pytz
|
||||
requests
|
||||
@ -6,24 +6,13 @@ Please see README.md in this directory for details.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
import shutil
|
||||
from collections import namedtuple, OrderedDict
|
||||
|
||||
import cimodel.data.binary_build_definitions as binary_build_definitions
|
||||
import cimodel.data.pytorch_build_definitions as pytorch_build_definitions
|
||||
import cimodel.data.simple.android_definitions
|
||||
import cimodel.data.simple.bazel_definitions
|
||||
import cimodel.data.simple.binary_smoketest
|
||||
import cimodel.data.simple.docker_definitions
|
||||
import cimodel.data.simple.ge_config_tests
|
||||
import cimodel.data.simple.ios_definitions
|
||||
import cimodel.data.simple.macos_definitions
|
||||
import cimodel.data.simple.mobile_definitions
|
||||
import cimodel.data.simple.nightly_android
|
||||
import cimodel.data.simple.nightly_ios
|
||||
import cimodel.data.simple.anaconda_prune_defintions
|
||||
import cimodel.data.windows_build_definitions as windows_build_definitions
|
||||
import cimodel.data.binary_build_definitions as binary_build_definitions
|
||||
import cimodel.data.caffe2_build_definitions as caffe2_build_definitions
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.lib.miniyaml as miniyaml
|
||||
|
||||
@ -32,7 +21,6 @@ class File(object):
|
||||
"""
|
||||
Verbatim copy the contents of a file into config.yml
|
||||
"""
|
||||
|
||||
def __init__(self, filename):
|
||||
self.filename = filename
|
||||
|
||||
@ -41,7 +29,7 @@ class File(object):
|
||||
shutil.copyfileobj(fh, output_filehandle)
|
||||
|
||||
|
||||
class FunctionGen(namedtuple("FunctionGen", "function depth")):
|
||||
class FunctionGen(namedtuple('FunctionGen', 'function depth')):
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
@ -51,14 +39,15 @@ class Treegen(FunctionGen):
|
||||
"""
|
||||
|
||||
def write(self, output_filehandle):
|
||||
miniyaml.render(output_filehandle, self.function(), self.depth)
|
||||
build_dict = OrderedDict()
|
||||
self.function(build_dict)
|
||||
miniyaml.render(output_filehandle, build_dict, self.depth)
|
||||
|
||||
|
||||
class Listgen(FunctionGen):
|
||||
"""
|
||||
Insert the content of a YAML list into config.yml
|
||||
"""
|
||||
|
||||
def write(self, output_filehandle):
|
||||
miniyaml.render(output_filehandle, self.function(), self.depth)
|
||||
|
||||
@ -68,6 +57,7 @@ def horizontal_rule():
|
||||
|
||||
|
||||
class Header(object):
|
||||
|
||||
def __init__(self, title, summary=None):
|
||||
self.title = title
|
||||
self.summary_lines = summary or []
|
||||
@ -80,103 +70,6 @@ class Header(object):
|
||||
for line in filter(None, lines):
|
||||
output_filehandle.write(line + "\n")
|
||||
|
||||
def filter_master_only_jobs(items):
|
||||
def _for_all_items(items, functor) -> None:
|
||||
if isinstance(items, list):
|
||||
for item in items:
|
||||
_for_all_items(item, functor)
|
||||
if isinstance(items, dict) and len(items) == 1:
|
||||
item_type, item = next(iter(items.items()))
|
||||
functor(item_type, item)
|
||||
|
||||
def _is_master_item(item):
|
||||
filters = item.get('filters', None)
|
||||
branches = filters.get('branches', None) if filters is not None else None
|
||||
branches_only = branches.get('only', None) if branches is not None else None
|
||||
return 'master' in branches_only if branches_only is not None else False
|
||||
|
||||
master_deps = set()
|
||||
|
||||
def _save_requires_if_master(item_type, item):
|
||||
requires = item.get('requires', None)
|
||||
item_name = item.get("name", None)
|
||||
if not isinstance(requires, list):
|
||||
return
|
||||
if _is_master_item(item) or item_name in master_deps:
|
||||
master_deps.update([n.strip('"') for n in requires])
|
||||
|
||||
def _do_filtering(items):
|
||||
if isinstance(items, list):
|
||||
rc = [_do_filtering(item) for item in items]
|
||||
return [item for item in rc if len(item if item is not None else []) > 0]
|
||||
assert isinstance(items, dict) and len(items) == 1
|
||||
item_type, item = next(iter(items.items()))
|
||||
item_name = item.get("name", None)
|
||||
item_name = item_name.strip('"') if item_name is not None else None
|
||||
if not _is_master_item(item) and item_name not in master_deps:
|
||||
return None
|
||||
if 'filters' in item:
|
||||
item = item.copy()
|
||||
item.pop('filters')
|
||||
return {item_type: item}
|
||||
|
||||
# Scan of dependencies twice to pick up nested required jobs
|
||||
# I.e. jobs depending on jobs that master-only job depend on
|
||||
_for_all_items(items, _save_requires_if_master)
|
||||
_for_all_items(items, _save_requires_if_master)
|
||||
return _do_filtering(items)
|
||||
|
||||
|
||||
def gen_build_workflows_tree():
|
||||
build_workflows_functions = [
|
||||
cimodel.data.simple.docker_definitions.get_workflow_jobs,
|
||||
pytorch_build_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.macos_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.android_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.ios_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.mobile_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.ge_config_tests.get_workflow_jobs,
|
||||
cimodel.data.simple.bazel_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.binary_smoketest.get_workflow_jobs,
|
||||
cimodel.data.simple.nightly_ios.get_workflow_jobs,
|
||||
cimodel.data.simple.nightly_android.get_workflow_jobs,
|
||||
cimodel.data.simple.anaconda_prune_defintions.get_workflow_jobs,
|
||||
windows_build_definitions.get_windows_workflows,
|
||||
binary_build_definitions.get_post_upload_jobs,
|
||||
binary_build_definitions.get_binary_smoke_test_jobs,
|
||||
]
|
||||
build_jobs = [f() for f in build_workflows_functions]
|
||||
master_build_jobs = filter_master_only_jobs(build_jobs)
|
||||
|
||||
binary_build_functions = [
|
||||
binary_build_definitions.get_binary_build_jobs,
|
||||
binary_build_definitions.get_nightly_tests,
|
||||
binary_build_definitions.get_nightly_uploads,
|
||||
]
|
||||
|
||||
slow_gradcheck_jobs = pytorch_build_definitions.get_workflow_jobs(only_slow_gradcheck=True)
|
||||
|
||||
return {
|
||||
"workflows": {
|
||||
"binary_builds": {
|
||||
"when": r"<< pipeline.parameters.run_binary_tests >>",
|
||||
"jobs": [f() for f in binary_build_functions],
|
||||
},
|
||||
"build": {
|
||||
"when": r"<< pipeline.parameters.run_build >>",
|
||||
"jobs": build_jobs,
|
||||
},
|
||||
"master_build": {
|
||||
"when": r"<< pipeline.parameters.run_master_build >>",
|
||||
"jobs": master_build_jobs,
|
||||
},
|
||||
"slow_gradcheck_build": {
|
||||
"when": r"<< pipeline.parameters.run_slow_gradcheck_build >>",
|
||||
"jobs": slow_gradcheck_jobs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Order of this list matters to the generated config.yml.
|
||||
YAML_SOURCES = [
|
||||
@ -184,22 +77,35 @@ YAML_SOURCES = [
|
||||
File("commands.yml"),
|
||||
File("nightly-binary-build-defaults.yml"),
|
||||
Header("Build parameters"),
|
||||
File("build-parameters/pytorch-build-params.yml"),
|
||||
File("build-parameters/binary-build-params.yml"),
|
||||
File("build-parameters/promote-build-params.yml"),
|
||||
File("pytorch-build-params.yml"),
|
||||
File("caffe2-build-params.yml"),
|
||||
File("binary-build-params.yml"),
|
||||
Header("Job specs"),
|
||||
File("job-specs/pytorch-job-specs.yml"),
|
||||
File("job-specs/binary-job-specs.yml"),
|
||||
File("job-specs/job-specs-custom.yml"),
|
||||
File("job-specs/job-specs-promote.yml"),
|
||||
File("job-specs/binary_update_htmls.yml"),
|
||||
File("job-specs/binary-build-tests.yml"),
|
||||
File("job-specs/docker_jobs.yml"),
|
||||
Header("Workflows"),
|
||||
Treegen(gen_build_workflows_tree, 0),
|
||||
File("workflows/workflows-scheduled-ci.yml"),
|
||||
File("workflows/workflows-ecr-gc.yml"),
|
||||
File("workflows/workflows-promote.yml"),
|
||||
File("pytorch-job-specs.yml"),
|
||||
File("caffe2-job-specs.yml"),
|
||||
File("binary-job-specs.yml"),
|
||||
File("job-specs-setup.yml"),
|
||||
File("job-specs-custom.yml"),
|
||||
File("binary_update_htmls.yml"),
|
||||
File("binary-build-tests.yml"),
|
||||
File("workflows.yml"),
|
||||
Listgen(pytorch_build_definitions.get_workflow_jobs, 3),
|
||||
File("workflows-pytorch-macos-builds.yml"),
|
||||
File("workflows-pytorch-android-gradle-build.yml"),
|
||||
File("workflows-pytorch-ios-builds.yml"),
|
||||
Listgen(caffe2_build_definitions.get_workflow_jobs, 3),
|
||||
File("workflows-binary-builds-smoke-subset.yml"),
|
||||
Header("Daily smoke test trigger"),
|
||||
Treegen(binary_build_definitions.add_binary_smoke_test_jobs, 1),
|
||||
Header("Daily binary build trigger"),
|
||||
Treegen(binary_build_definitions.add_binary_build_jobs, 1),
|
||||
File("workflows-nightly-ios-binary-builds.yml"),
|
||||
File("workflows-nightly-android-binary-builds.yml"),
|
||||
Header("Nightly tests"),
|
||||
Listgen(binary_build_definitions.get_nightly_tests, 3),
|
||||
File("workflows-nightly-uploads-header.yml"),
|
||||
Listgen(binary_build_definitions.get_nightly_uploads, 3),
|
||||
File("workflows-s3-html.yml"),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -1,5 +0,0 @@
|
||||
cd $PSScriptRoot;
|
||||
$NewFile = New-TemporaryFile;
|
||||
python generate_config_yml.py > $NewFile.name
|
||||
(Get-Content $NewFile.name -Raw).TrimEnd().Replace("`r`n","`n") | Set-Content config.yml -Force
|
||||
Remove-Item $NewFile.name
|
||||
@ -1,17 +1,8 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash -xe
|
||||
|
||||
# Allows this script to be invoked from any directory:
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
UNCOMMIT_CHANGE=$(git status -s | grep " config.yml" | wc -l | xargs)
|
||||
if [[ $UNCOMMIT_CHANGE != 0 ]]; then
|
||||
OLD_FILE=$(mktemp)
|
||||
cp config.yml "$OLD_FILE"
|
||||
echo "Uncommitted change detected in .circleci/config.yml"
|
||||
echo "It has been backed up to $OLD_FILE"
|
||||
fi
|
||||
cd $(dirname "$0")
|
||||
|
||||
NEW_FILE=$(mktemp)
|
||||
./generate_config_yml.py > "$NEW_FILE"
|
||||
cp "$NEW_FILE" config.yml
|
||||
echo "New config generated in .circleci/config.yml"
|
||||
./generate_config_yml.py > $NEW_FILE
|
||||
cp $NEW_FILE config.yml
|
||||
|
||||
@ -1,20 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
|
||||
|
||||
# This step runs on multiple executors with different envfile locations
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# macos executor (builds and tests)
|
||||
workdir="/Users/distiller/project"
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
# windows executor (builds and tests)
|
||||
rm -rf /c/w
|
||||
ln -s "/c/Users/circleci/project" /c/w
|
||||
workdir="/c/w"
|
||||
elif [[ -d "/home/circleci/project" ]]; then
|
||||
# machine executor (binary tests)
|
||||
workdir="/home/circleci/project"
|
||||
@ -24,22 +13,11 @@ else
|
||||
fi
|
||||
|
||||
# It is very important that this stays in sync with binary_populate_env.sh
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
# We need to make the paths as short as possible on Windows
|
||||
export PYTORCH_ROOT="$workdir/p"
|
||||
export BUILDER_ROOT="$workdir/b"
|
||||
else
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
fi
|
||||
|
||||
# Try to extract PR number from branch if not already set
|
||||
if [[ -z "${CIRCLE_PR_NUMBER:-}" ]]; then
|
||||
CIRCLE_PR_NUMBER="$(echo ${CIRCLE_BRANCH} | sed -E -n 's/pull\/([0-9]*).*/\1/p')"
|
||||
fi
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
|
||||
# Clone the Pytorch branch
|
||||
retry git clone https://github.com/pytorch/pytorch.git "$PYTORCH_ROOT"
|
||||
git clone https://github.com/pytorch/pytorch.git "$PYTORCH_ROOT"
|
||||
pushd "$PYTORCH_ROOT"
|
||||
if [[ -n "${CIRCLE_PR_NUMBER:-}" ]]; then
|
||||
# "smoke" binary build on PRs
|
||||
@ -55,13 +33,13 @@ else
|
||||
echo "Can't tell what to checkout"
|
||||
exit 1
|
||||
fi
|
||||
retry git submodule update --init --recursive
|
||||
git submodule update --init --recursive --quiet
|
||||
echo "Using Pytorch from "
|
||||
git --no-pager log --max-count 1
|
||||
popd
|
||||
|
||||
# Clone the Builder master repo
|
||||
retry git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
|
||||
git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
|
||||
pushd "$BUILDER_ROOT"
|
||||
echo "Using builder from "
|
||||
git --no-pager log --max-count 1
|
||||
|
||||
@ -31,9 +31,9 @@ fi
|
||||
|
||||
conda_sh="$workdir/install_miniconda.sh"
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
curl --retry 3 -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
retry curl -o "$conda_sh" https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
else
|
||||
curl --retry 3 -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
|
||||
retry curl -o "$conda_sh" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
|
||||
fi
|
||||
chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
|
||||
@ -1,29 +1,24 @@
|
||||
#!/bin/bash
|
||||
set -ex -o pipefail
|
||||
set -eux -o pipefail
|
||||
|
||||
echo ""
|
||||
echo "DIR: $(pwd)"
|
||||
echo "PWD: ${PWD}"
|
||||
WORKSPACE=/Users/distiller/workspace
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
export TCLLIBPATH="/usr/local/lib"
|
||||
|
||||
export TCLLIBPATH="/usr/local/lib"
|
||||
# Install conda
|
||||
curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x ~/conda.sh
|
||||
/bin/bash ~/conda.sh -b -p ~/anaconda
|
||||
curl -o ~/Downloads/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x ~/Downloads/conda.sh
|
||||
/bin/bash ~/Downloads/conda.sh -b -p ~/anaconda
|
||||
export PATH="~/anaconda/bin:${PATH}"
|
||||
source ~/anaconda/bin/activate
|
||||
|
||||
# Install dependencies
|
||||
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests typing_extensions --yes
|
||||
conda install -c conda-forge valgrind --yes
|
||||
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing requests --yes
|
||||
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
||||
|
||||
# sync submodules
|
||||
cd ${PROJ_ROOT}
|
||||
git submodule sync
|
||||
git submodule update --init --recursive
|
||||
|
||||
# run build script
|
||||
chmod a+x ${PROJ_ROOT}/scripts/build_ios.sh
|
||||
echo "########################################################"
|
||||
@ -31,13 +26,13 @@ cat ${PROJ_ROOT}/scripts/build_ios.sh
|
||||
echo "########################################################"
|
||||
echo "IOS_ARCH: ${IOS_ARCH}"
|
||||
echo "IOS_PLATFORM: ${IOS_PLATFORM}"
|
||||
export BUILD_PYTORCH_MOBILE=1
|
||||
export IOS_ARCH=${IOS_ARCH}
|
||||
export IOS_PLATFORM=${IOS_PLATFORM}
|
||||
unbuffer ${PROJ_ROOT}/scripts/build_ios.sh 2>&1 | ts
|
||||
|
||||
#store the binary
|
||||
cd ${WORKSPACE}
|
||||
DEST_DIR=${WORKSPACE}/ios
|
||||
mkdir -p ${DEST_DIR}
|
||||
cp -R ${PROJ_ROOT}/build_ios/install ${DEST_DIR}
|
||||
mv ${DEST_DIR}/install ${DEST_DIR}/${IOS_ARCH}
|
||||
mv ${DEST_DIR}/install ${DEST_DIR}/${IOS_ARCH}
|
||||
@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex -o pipefail
|
||||
|
||||
echo ""
|
||||
echo "DIR: $(pwd)"
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
cd ${PROJ_ROOT}/ios/TestApp
|
||||
# install fastlane
|
||||
sudo gem install bundler && bundle install
|
||||
# install certificates
|
||||
echo "${IOS_CERT_KEY}" >> cert.txt
|
||||
base64 --decode cert.txt -o Certificates.p12
|
||||
rm cert.txt
|
||||
bundle exec fastlane install_cert
|
||||
# install the provisioning profile
|
||||
PROFILE=PyTorch_CI_2021.mobileprovision
|
||||
PROVISIONING_PROFILES=~/Library/MobileDevice/Provisioning\ Profiles
|
||||
mkdir -pv "${PROVISIONING_PROFILES}"
|
||||
cd "${PROVISIONING_PROFILES}"
|
||||
echo "${IOS_SIGN_KEY}" >> cert.txt
|
||||
base64 --decode cert.txt -o ${PROFILE}
|
||||
rm cert.txt
|
||||
# run the ruby build script
|
||||
if ! [ -x "$(command -v xcodebuild)" ]; then
|
||||
echo 'Error: xcodebuild is not installed.'
|
||||
exit 1
|
||||
fi
|
||||
PROFILE=PyTorch_CI_2021
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID}
|
||||
@ -1,8 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -ex -o pipefail
|
||||
set -eux -o pipefail
|
||||
|
||||
echo ""
|
||||
echo "DIR: $(pwd)"
|
||||
echo "PWD: $(pwd)"
|
||||
WORKSPACE=/Users/distiller/workspace
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
ARTIFACTS_DIR=${WORKSPACE}/ios
|
||||
@ -14,17 +14,17 @@ mkdir -p ${ZIP_DIR}/src
|
||||
cp -R ${ARTIFACTS_DIR}/arm64/include ${ZIP_DIR}/install/
|
||||
# build a FAT bianry
|
||||
cd ${ZIP_DIR}/install/lib
|
||||
target_libs=(libc10.a libclog.a libcpuinfo.a libeigen_blas.a libpthreadpool.a libpytorch_qnnpack.a libtorch_cpu.a libtorch.a libXNNPACK.a)
|
||||
target_libs=(libc10.a libclog.a libcpuinfo.a libeigen_blas.a libpytorch_qnnpack.a libtorch.a)
|
||||
for lib in ${target_libs[*]}
|
||||
do
|
||||
if [ -f "${ARTIFACTS_DIR}/x86_64/lib/${lib}" ] && [ -f "${ARTIFACTS_DIR}/arm64/lib/${lib}" ]; then
|
||||
libs=("${ARTIFACTS_DIR}/x86_64/lib/${lib}" "${ARTIFACTS_DIR}/arm64/lib/${lib}")
|
||||
lipo -create "${libs[@]}" -o ${ZIP_DIR}/install/lib/${lib}
|
||||
fi
|
||||
libs=(${ARTIFACTS_DIR}/x86_64/lib/${lib} ${ARTIFACTS_DIR}/arm64/lib/${lib})
|
||||
lipo -create "${libs[@]}" -o ${ZIP_DIR}/install/lib/${lib}
|
||||
done
|
||||
# for nnpack, we only support arm64 build
|
||||
cp ${ARTIFACTS_DIR}/arm64/lib/libnnpack.a ./
|
||||
lipo -i ${ZIP_DIR}/install/lib/*.a
|
||||
# copy the umbrella header and license
|
||||
cp ${PROJ_ROOT}/ios/LibTorch-Lite.h ${ZIP_DIR}/src/
|
||||
cp ${PROJ_ROOT}/ios/LibTorch.h ${ZIP_DIR}/src/
|
||||
cp ${PROJ_ROOT}/LICENSE ${ZIP_DIR}/
|
||||
# zip the library
|
||||
ZIPFILE=libtorch_ios_nightly_build.zip
|
||||
@ -34,13 +34,7 @@ touch version.txt
|
||||
echo $(date +%s) > version.txt
|
||||
zip -r ${ZIPFILE} install src version.txt LICENSE
|
||||
# upload to aws
|
||||
# Install conda then 'conda install' awscli
|
||||
curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x ~/conda.sh
|
||||
/bin/bash ~/conda.sh -b -p ~/anaconda
|
||||
export PATH="~/anaconda/bin:${PATH}"
|
||||
source ~/anaconda/bin/activate
|
||||
conda install -c conda-forge awscli --yes
|
||||
brew install awscli
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${AWS_S3_ACCESS_KEY_FOR_PYTORCH_BINARY_UPLOAD}
|
||||
export AWS_SECRET_ACCESS_KEY=${AWS_S3_ACCESS_SECRET_FOR_PYTORCH_BINARY_UPLOAD}
|
||||
|
||||
@ -4,31 +4,27 @@ echo "RUNNING ON $(uname -a) WITH $(nproc) CPUS AND $(free -m)"
|
||||
set -eux -o pipefail
|
||||
source /env
|
||||
|
||||
# Because most Circle executors only have 20 CPUs, using more causes OOMs w/ Ninja and nvcc parallelization
|
||||
MEMORY_LIMIT_MAX_JOBS=18
|
||||
NUM_CPUS=$(( $(nproc) - 2 ))
|
||||
|
||||
# Defaults here for **binary** linux builds so they can be changed in one place
|
||||
export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))}
|
||||
|
||||
if [[ "${DESIRED_CUDA}" == "cu111" || "${DESIRED_CUDA}" == "cu113" ]]; then
|
||||
export BUILD_SPLIT_CUDA="ON"
|
||||
fi
|
||||
# Defaults here so they can be changed in one place
|
||||
export MAX_JOBS=12
|
||||
|
||||
# Parse the parameters
|
||||
if [[ "$PACKAGE_TYPE" == 'conda' ]]; then
|
||||
build_script='conda/build_pytorch.sh'
|
||||
elif [[ "$DESIRED_CUDA" == cpu ]]; then
|
||||
build_script='manywheel/build_cpu.sh'
|
||||
elif [[ "$DESIRED_CUDA" == *"rocm"* ]]; then
|
||||
build_script='manywheel/build_rocm.sh'
|
||||
else
|
||||
build_script='manywheel/build.sh'
|
||||
fi
|
||||
|
||||
if [[ "$CIRCLE_BRANCH" == "master" ]] || [[ "$CIRCLE_BRANCH" == release/* ]]; then
|
||||
export BUILD_DEBUG_INFO=1
|
||||
# We want to call unbuffer, which calls tclsh which finds the expect
|
||||
# package. The expect was installed by yum into /usr/bin so we want to
|
||||
# find /usr/bin/tclsh, but this is shadowed by /opt/conda/bin/tclsh in
|
||||
# the conda docker images, so we prepend it to the path here.
|
||||
if [[ "$PACKAGE_TYPE" == 'conda' ]]; then
|
||||
mkdir /just_tclsh_bin
|
||||
ln -s /usr/bin/tclsh /just_tclsh_bin/tclsh
|
||||
export PATH=/just_tclsh_bin:$PATH
|
||||
fi
|
||||
|
||||
# Build the package
|
||||
SKIP_ALL_TESTS=1 "/builder/$build_script"
|
||||
SKIP_ALL_TESTS=1 unbuffer "/builder/$build_script" | ts
|
||||
|
||||
@ -5,43 +5,17 @@ cat >/home/circleci/project/ci_test_script.sh <<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
set -eux -o pipefail
|
||||
|
||||
python_nodot="\$(echo $DESIRED_PYTHON | tr -d m.u)"
|
||||
|
||||
# Set up Python
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
# There was a bug that was introduced in conda-package-handling >= 1.6.1 that makes archives
|
||||
# above a certain size fail out when attempting to extract
|
||||
# see: https://github.com/conda/conda-package-handling/issues/71
|
||||
conda install -y conda-package-handling=1.6.0
|
||||
retry conda create -qyn testenv python="$DESIRED_PYTHON"
|
||||
source activate testenv >/dev/null
|
||||
elif [[ "$DESIRED_PYTHON" == 2.7mu ]]; then
|
||||
export PATH="/opt/python/cp27-cp27mu/bin:\$PATH"
|
||||
elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
python_path="/opt/python/cp\$python_nodot-cp\${python_nodot}"
|
||||
# Prior to Python 3.8 paths were suffixed with an 'm'
|
||||
if [[ -d "\${python_path}/bin" ]]; then
|
||||
export PATH="\${python_path}/bin:\$PATH"
|
||||
elif [[ -d "\${python_path}m/bin" ]]; then
|
||||
export PATH="\${python_path}m/bin:\$PATH"
|
||||
fi
|
||||
python_nodot="\$(echo $DESIRED_PYTHON | tr -d m.u)"
|
||||
export PATH="/opt/python/cp\$python_nodot-cp\${python_nodot}m/bin:\$PATH"
|
||||
fi
|
||||
|
||||
EXTRA_CONDA_FLAGS=""
|
||||
NUMPY_PIN=""
|
||||
if [[ "\$python_nodot" = *39* ]]; then
|
||||
EXTRA_CONDA_FLAGS="-c=conda-forge"
|
||||
# There's an issue with conda channel priority where it'll randomly pick 1.19 over 1.20
|
||||
# we set a lower boundary here just to be safe
|
||||
NUMPY_PIN=">=1.20"
|
||||
fi
|
||||
|
||||
if [[ "$DESIRED_CUDA" == "cu112" ]]; then
|
||||
EXTRA_CONDA_FLAGS="-c=conda-forge"
|
||||
fi
|
||||
|
||||
# Move debug wheels out of the the package dir so they don't get installed
|
||||
mkdir -p /tmp/debug_final_pkgs
|
||||
mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to move"
|
||||
|
||||
# Install the package
|
||||
# These network calls should not have 'retry's because they are installing
|
||||
# locally and aren't actually network calls
|
||||
@ -50,37 +24,23 @@ mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to m
|
||||
# conda build scripts themselves. These should really be consolidated
|
||||
pkg="/final_pkgs/\$(ls /final_pkgs)"
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
(
|
||||
# For some reason conda likes to re-activate the conda environment when attempting this install
|
||||
# which means that a deactivate is run and some variables might not exist when that happens,
|
||||
# namely CONDA_MKL_INTERFACE_LAYER_BACKUP from libblas so let's just ignore unbound variables when
|
||||
# it comes to the conda installation commands
|
||||
set +u
|
||||
retry conda install \${EXTRA_CONDA_FLAGS} -yq \
|
||||
"numpy\${NUMPY_PIN}" \
|
||||
future \
|
||||
mkl>=2018 \
|
||||
ninja \
|
||||
dataclasses \
|
||||
typing-extensions \
|
||||
defaults::protobuf \
|
||||
six
|
||||
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
|
||||
retry conda install -c pytorch -y cpuonly
|
||||
conda install -y "\$pkg" --offline
|
||||
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
|
||||
conda install -y cpuonly -c pytorch
|
||||
fi
|
||||
retry conda install -yq future numpy protobuf six
|
||||
if [[ "$DESIRED_CUDA" != 'cpu' ]]; then
|
||||
# DESIRED_CUDA is in format cu90 or cu100
|
||||
if [[ "${#DESIRED_CUDA}" == 4 ]]; then
|
||||
cu_ver="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3}"
|
||||
else
|
||||
# DESIRED_CUDA is in format cu90 or cu102
|
||||
if [[ "${#DESIRED_CUDA}" == 4 ]]; then
|
||||
cu_ver="${DESIRED_CUDA:2:1}.${DESIRED_CUDA:3}"
|
||||
else
|
||||
cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
|
||||
fi
|
||||
retry conda install \${EXTRA_CONDA_FLAGS} -yq -c nvidia -c pytorch "cudatoolkit=\${cu_ver}"
|
||||
cu_ver="${DESIRED_CUDA:2:2}.${DESIRED_CUDA:4}"
|
||||
fi
|
||||
conda install \${EXTRA_CONDA_FLAGS} -y "\$pkg" --offline
|
||||
)
|
||||
retry conda install -yq -c pytorch "cudatoolkit=\${cu_ver}"
|
||||
fi
|
||||
elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
pip install "\$pkg"
|
||||
retry pip install -q future numpy protobuf typing-extensions six
|
||||
retry pip install -q future numpy protobuf six
|
||||
fi
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
pkg="\$(ls /final_pkgs/*-latest.zip)"
|
||||
@ -90,7 +50,6 @@ fi
|
||||
|
||||
# Test the package
|
||||
/builder/check_binary.sh
|
||||
|
||||
# =================== The above code will be executed inside Docker container ===================
|
||||
EOL
|
||||
echo
|
||||
|
||||
40
.circleci/scripts/binary_linux_upload.sh
Executable file
40
.circleci/scripts/binary_linux_upload.sh
Executable file
@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
# Do NOT set -x
|
||||
source /home/circleci/project/env
|
||||
set -eu -o pipefail
|
||||
set +x
|
||||
declare -x "AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}"
|
||||
declare -x "AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}"
|
||||
cat >/home/circleci/project/login_to_anaconda.sh <<EOL
|
||||
set +x
|
||||
echo "Trying to login to Anaconda"
|
||||
yes | anaconda login \
|
||||
--username "$PYTORCH_BINARY_PJH5_CONDA_USERNAME" \
|
||||
--password "$PYTORCH_BINARY_PJH5_CONDA_PASSWORD"
|
||||
set -x
|
||||
EOL
|
||||
chmod +x /home/circleci/project/login_to_anaconda.sh
|
||||
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
# DO NOT TURN -x ON BEFORE THIS LINE
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
set -eux -o pipefail
|
||||
export PATH="$MINICONDA_ROOT/bin:$PATH"
|
||||
|
||||
# Upload the package to the final location
|
||||
pushd /home/circleci/project/final_pkgs
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
retry conda install -yq anaconda-client
|
||||
retry timeout 30 /home/circleci/project/login_to_anaconda.sh
|
||||
anaconda upload "$(ls)" -u pytorch-nightly --label main --no-progress --force
|
||||
elif [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
retry pip install -q awscli
|
||||
s3_dir="s3://pytorch/libtorch/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
|
||||
for pkg in $(ls); do
|
||||
retry aws s3 cp "$pkg" "$s3_dir" --acl public-read
|
||||
done
|
||||
else
|
||||
retry pip install -q awscli
|
||||
s3_dir="s3://pytorch/whl/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
|
||||
retry aws s3 cp "$(ls)" "$s3_dir" --acl public-read
|
||||
fi
|
||||
@ -5,30 +5,26 @@ source "/Users/distiller/project/env"
|
||||
export "PATH=$workdir/miniconda/bin:$PATH"
|
||||
pkg="$workdir/final_pkgs/$(ls $workdir/final_pkgs)"
|
||||
|
||||
# Don't test libtorch
|
||||
# TODO we should test libtorch
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create a new test env
|
||||
# TODO cut all this out into a separate test job and have an entirely different
|
||||
# miniconda
|
||||
if [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
source deactivate || true
|
||||
conda create -qyn test python="$DESIRED_PYTHON"
|
||||
source activate test >/dev/null
|
||||
fi
|
||||
source deactivate || true
|
||||
conda create -qyn test python="$DESIRED_PYTHON"
|
||||
source activate test >/dev/null
|
||||
|
||||
# Install the package
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
pkg="$(ls $workdir/final_pkgs/*-latest.zip)"
|
||||
unzip "$pkg" -d /tmp
|
||||
cd /tmp/libtorch
|
||||
elif [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
conda install -y "$pkg"
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
conda install -y "$pkg" --offline
|
||||
else
|
||||
pip install "$pkg" -v
|
||||
pip install "$pkg" --no-index --no-dependencies -v
|
||||
fi
|
||||
|
||||
# Test
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
$workdir/builder/check_binary.sh
|
||||
else
|
||||
pushd "$workdir/pytorch"
|
||||
$workdir/builder/run_tests.sh "$PACKAGE_TYPE" "$DESIRED_PYTHON" "$DESIRED_CUDA"
|
||||
fi
|
||||
pushd "$workdir/pytorch"
|
||||
$workdir/builder/run_tests.sh "$PACKAGE_TYPE" "$DESIRED_PYTHON" "$DESIRED_CUDA"
|
||||
|
||||
40
.circleci/scripts/binary_macos_upload.sh
Executable file
40
.circleci/scripts/binary_macos_upload.sh
Executable file
@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
# Do NOT set -x
|
||||
set -eu -o pipefail
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}"
|
||||
export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}"
|
||||
cat >/Users/distiller/project/login_to_anaconda.sh <<EOL
|
||||
set +x
|
||||
echo "Trying to login to Anaconda"
|
||||
yes | anaconda login \
|
||||
--username "$PYTORCH_BINARY_PJH5_CONDA_USERNAME" \
|
||||
--password "$PYTORCH_BINARY_PJH5_CONDA_PASSWORD"
|
||||
set -x
|
||||
EOL
|
||||
chmod +x /Users/distiller/project/login_to_anaconda.sh
|
||||
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
# DO NOT TURN -x ON BEFORE THIS LINE
|
||||
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!
|
||||
set -eux -o pipefail
|
||||
|
||||
source "/Users/distiller/project/env"
|
||||
export "PATH=$workdir/miniconda/bin:$PATH"
|
||||
|
||||
pushd "$workdir/final_pkgs"
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
retry conda install -yq anaconda-client
|
||||
retry /Users/distiller/project/login_to_anaconda.sh
|
||||
retry anaconda upload "$(ls)" -u pytorch-nightly --label main --no-progress --force
|
||||
elif [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
retry pip install -q awscli
|
||||
s3_dir="s3://pytorch/libtorch/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
|
||||
for pkg in $(ls); do
|
||||
retry aws s3 cp "$pkg" "$s3_dir" --acl public-read
|
||||
done
|
||||
else
|
||||
retry pip install -q awscli
|
||||
s3_dir="s3://pytorch/whl/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
|
||||
retry aws s3 cp "$(ls)" "$s3_dir" --acl public-read
|
||||
fi
|
||||
@ -2,31 +2,11 @@
|
||||
set -eux -o pipefail
|
||||
export TZ=UTC
|
||||
|
||||
tagged_version() {
|
||||
# Grabs version from either the env variable CIRCLE_TAG
|
||||
# or the pytorch git described version
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
GIT_DESCRIBE="git --git-dir ${workdir}/p/.git describe"
|
||||
else
|
||||
GIT_DESCRIBE="git --git-dir ${workdir}/pytorch/.git describe"
|
||||
fi
|
||||
if [[ -n "${CIRCLE_TAG:-}" ]]; then
|
||||
echo "${CIRCLE_TAG}"
|
||||
elif ${GIT_DESCRIBE} --exact --tags >/dev/null; then
|
||||
${GIT_DESCRIBE} --tags
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# We need to write an envfile to persist these variables to following
|
||||
# steps, but the location of the envfile depends on the circleci executor
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# macos executor (builds and tests)
|
||||
workdir="/Users/distiller/project"
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
# windows executor (builds and tests)
|
||||
workdir="/c/w"
|
||||
elif [[ -d "/home/circleci/project" ]]; then
|
||||
# machine executor (binary tests)
|
||||
workdir="/home/circleci/project"
|
||||
@ -43,15 +23,7 @@ configs=($BUILD_ENVIRONMENT)
|
||||
export PACKAGE_TYPE="${configs[0]}"
|
||||
export DESIRED_PYTHON="${configs[1]}"
|
||||
export DESIRED_CUDA="${configs[2]}"
|
||||
if [[ "${BUILD_FOR_SYSTEM:-}" == "windows" ]]; then
|
||||
export DESIRED_DEVTOOLSET=""
|
||||
export LIBTORCH_CONFIG="${configs[3]:-}"
|
||||
if [[ "$LIBTORCH_CONFIG" == 'debug' ]]; then
|
||||
export DEBUG=1
|
||||
fi
|
||||
else
|
||||
export DESIRED_DEVTOOLSET="${configs[3]:-}"
|
||||
fi
|
||||
export DESIRED_DEVTOOLSET="${configs[3]:-}"
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
export BUILD_PYTHONLESS=1
|
||||
fi
|
||||
@ -60,77 +32,36 @@ fi
|
||||
export DOCKER_IMAGE=${DOCKER_IMAGE:-}
|
||||
if [[ -z "$DOCKER_IMAGE" ]]; then
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
export DOCKER_IMAGE="pytorch/conda-cuda"
|
||||
export DOCKER_IMAGE="soumith/conda-cuda"
|
||||
elif [[ "$DESIRED_CUDA" == cpu ]]; then
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cuda100"
|
||||
export DOCKER_IMAGE="soumith/manylinux-cuda100"
|
||||
else
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cuda${DESIRED_CUDA:2}"
|
||||
export DOCKER_IMAGE="soumith/manylinux-cuda${DESIRED_CUDA:2}"
|
||||
fi
|
||||
fi
|
||||
|
||||
USE_GOLD_LINKER="OFF"
|
||||
# GOLD linker can not be used if CUPTI is statically linked into PyTorch, see https://github.com/pytorch/pytorch/issues/57744
|
||||
if [[ ${DESIRED_CUDA} == "cpu" ]]; then
|
||||
USE_GOLD_LINKER="ON"
|
||||
# Upload to parallel folder for devtoolsets
|
||||
# All nightlies used to be devtoolset3, then devtoolset7 was added as a build
|
||||
# option, so the upload was redirected to nightly/devtoolset7 to avoid
|
||||
# conflicts with other binaries (there shouldn't be any conflicts). Now we are
|
||||
# making devtoolset7 the default.
|
||||
if [[ "$DESIRED_DEVTOOLSET" == 'devtoolset7' || "$DESIRED_DEVTOOLSET" == *"cxx11-abi"* || "$(uname)" == 'Darwin' ]]; then
|
||||
export PIP_UPLOAD_FOLDER='nightly/'
|
||||
else
|
||||
# On linux machines, this shouldn't actually be called anymore. This is just
|
||||
# here for extra safety.
|
||||
export PIP_UPLOAD_FOLDER='nightly/devtoolset3/'
|
||||
fi
|
||||
|
||||
USE_WHOLE_CUDNN="OFF"
|
||||
# Link whole cuDNN for CUDA-11.1 to include fp16 fast kernels
|
||||
if [[ "$(uname)" == "Linux" && "${DESIRED_CUDA}" == "cu111" ]]; then
|
||||
USE_WHOLE_CUDNN="ON"
|
||||
fi
|
||||
|
||||
# Default to nightly, since that's where this normally uploads to
|
||||
PIP_UPLOAD_FOLDER='nightly/'
|
||||
# We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it
|
||||
export DATE="$(date -u +%Y%m%d)"
|
||||
#TODO: We should be pulling semver version from the base version.txt
|
||||
BASE_BUILD_VERSION="1.10.0.dev$DATE"
|
||||
# Change BASE_BUILD_VERSION to git tag when on a git tag
|
||||
# Use 'git -C' to make doubly sure we're in the correct directory for checking
|
||||
# the git tag
|
||||
if tagged_version >/dev/null; then
|
||||
# Switch upload folder to 'test/' if we are on a tag
|
||||
PIP_UPLOAD_FOLDER='test/'
|
||||
# Grab git tag, remove prefixed v and remove everything after -
|
||||
# Used to clean up tags that are for release candidates like v1.6.0-rc1
|
||||
# Turns tag v1.6.0-rc1 -> v1.6.0
|
||||
BASE_BUILD_VERSION="$(tagged_version | sed -e 's/^v//' -e 's/-.*$//')"
|
||||
fi
|
||||
if [[ "$(uname)" == 'Darwin' ]] || [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
export PYTORCH_BUILD_VERSION="${BASE_BUILD_VERSION}"
|
||||
if [[ "$(uname)" == 'Darwin' ]] || [[ "$DESIRED_CUDA" == "cu100" ]] || [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
export PYTORCH_BUILD_VERSION="1.3.0.dev$DATE"
|
||||
else
|
||||
export PYTORCH_BUILD_VERSION="${BASE_BUILD_VERSION}+$DESIRED_CUDA"
|
||||
export PYTORCH_BUILD_VERSION="1.3.0.dev$DATE+$DESIRED_CUDA"
|
||||
fi
|
||||
export PYTORCH_BUILD_NUMBER=1
|
||||
|
||||
|
||||
JAVA_HOME=
|
||||
BUILD_JNI=OFF
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
POSSIBLE_JAVA_HOMES=()
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/local)
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/lib/jvm/java-8-openjdk-amd64)
|
||||
POSSIBLE_JAVA_HOMES+=(/Library/Java/JavaVirtualMachines/*.jdk/Contents/Home)
|
||||
# Add the Windows-specific JNI path
|
||||
POSSIBLE_JAVA_HOMES+=("$PWD/.circleci/windows-jni/")
|
||||
for JH in "${POSSIBLE_JAVA_HOMES[@]}" ; do
|
||||
if [[ -e "$JH/include/jni.h" ]] ; then
|
||||
# Skip if we're not on Windows but haven't found a JAVA_HOME
|
||||
if [[ "$JH" == "$PWD/.circleci/windows-jni/" && "$OSTYPE" != "msys" ]] ; then
|
||||
break
|
||||
fi
|
||||
echo "Found jni.h under $JH"
|
||||
JAVA_HOME="$JH"
|
||||
BUILD_JNI=ON
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
echo "Did not find jni.h"
|
||||
fi
|
||||
fi
|
||||
|
||||
cat >>"$envfile" <<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
export TZ=UTC
|
||||
@ -142,13 +73,9 @@ export DESIRED_CUDA="$DESIRED_CUDA"
|
||||
export LIBTORCH_VARIANT="${LIBTORCH_VARIANT:-}"
|
||||
export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}"
|
||||
export DESIRED_DEVTOOLSET="$DESIRED_DEVTOOLSET"
|
||||
if [[ "${BUILD_FOR_SYSTEM:-}" == "windows" ]]; then
|
||||
export LIBTORCH_CONFIG="${LIBTORCH_CONFIG:-}"
|
||||
export DEBUG="${DEBUG:-}"
|
||||
fi
|
||||
|
||||
export DATE="$DATE"
|
||||
export NIGHTLIES_DATE_PREAMBLE=1.10.0.dev
|
||||
export NIGHTLIES_DATE_PREAMBLE=1.3.0.dev
|
||||
export PYTORCH_BUILD_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
export PYTORCH_BUILD_NUMBER="$PYTORCH_BUILD_NUMBER"
|
||||
export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
@ -158,20 +85,13 @@ export TORCH_PACKAGE_NAME='torch'
|
||||
export TORCH_CONDA_BUILD_FOLDER='pytorch-nightly'
|
||||
|
||||
export USE_FBGEMM=1
|
||||
export JAVA_HOME=$JAVA_HOME
|
||||
export BUILD_JNI=$BUILD_JNI
|
||||
export PIP_UPLOAD_FOLDER="$PIP_UPLOAD_FOLDER"
|
||||
export DOCKER_IMAGE="$DOCKER_IMAGE"
|
||||
|
||||
export workdir="$workdir"
|
||||
export MAC_PACKAGE_WORK_DIR="$workdir"
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export PYTORCH_ROOT="$workdir/p"
|
||||
export BUILDER_ROOT="$workdir/b"
|
||||
else
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
fi
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
export MINICONDA_ROOT="$workdir/miniconda"
|
||||
export PYTORCH_FINAL_PACKAGE_DIR="$workdir/final_pkgs"
|
||||
|
||||
@ -179,11 +99,6 @@ export CIRCLE_TAG="${CIRCLE_TAG:-}"
|
||||
export CIRCLE_SHA1="$CIRCLE_SHA1"
|
||||
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
|
||||
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
|
||||
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
||||
|
||||
export USE_GOLD_LINKER="${USE_GOLD_LINKER}"
|
||||
export USE_GLOO_WITH_OPENSSL="ON"
|
||||
export USE_WHOLE_CUDNN="${USE_WHOLE_CUDNN}"
|
||||
# =================== The above code will be executed inside Docker container ===================
|
||||
EOL
|
||||
|
||||
|
||||
@ -16,12 +16,31 @@ set -eux -o pipefail
|
||||
# Expect actual code to be written to this file
|
||||
chmod +x /home/circleci/project/ci_test_script.sh
|
||||
|
||||
VOLUME_MOUNTS="-v /home/circleci/project/:/circleci_stuff -v /home/circleci/project/final_pkgs:/final_pkgs -v ${PYTORCH_ROOT}:/pytorch -v ${BUILDER_ROOT}:/builder"
|
||||
# Run the docker
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --gpus all ${VOLUME_MOUNTS} -t -d "${DOCKER_IMAGE}")
|
||||
export id=$(docker run --runtime=nvidia -t -d "${DOCKER_IMAGE}")
|
||||
else
|
||||
export id=$(docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined ${VOLUME_MOUNTS} -t -d "${DOCKER_IMAGE}")
|
||||
export id=$(docker run -t -d "${DOCKER_IMAGE}")
|
||||
fi
|
||||
|
||||
# Copy the envfile and script with all the code to run into the docker.
|
||||
docker cp /home/circleci/project/. "$id:/circleci_stuff"
|
||||
|
||||
# Copy built packages into the docker to test. This should only exist on the
|
||||
# binary test jobs. The package should've been created from a binary build job,
|
||||
# whhich persisted the package to a CircleCI workspace, which this job then
|
||||
# copies into a GPU enabled docker for testing
|
||||
if [[ -d "/home/circleci/project/final_pkgs" ]]; then
|
||||
docker cp /home/circleci/project/final_pkgs "$id:/final_pkgs"
|
||||
fi
|
||||
|
||||
# Copy the needed repos into the docker. These do not exist in the smoke test
|
||||
# jobs, since the smoke test jobs do not need the Pytorch source code.
|
||||
if [[ -d "$PYTORCH_ROOT" ]]; then
|
||||
docker cp "$PYTORCH_ROOT" "$id:/pytorch"
|
||||
fi
|
||||
if [[ -d "$BUILDER_ROOT" ]]; then
|
||||
docker cp "$BUILDER_ROOT" "$id:/builder"
|
||||
fi
|
||||
|
||||
# Execute the test script that was populated by an earlier section
|
||||
|
||||
@ -1,98 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
PACKAGE_TYPE=${PACKAGE_TYPE:-conda}
|
||||
|
||||
PKG_DIR=${PKG_DIR:-/tmp/workspace/final_pkgs}
|
||||
|
||||
# Designates whether to submit as a release candidate or a nightly build
|
||||
# Value should be `test` when uploading release candidates
|
||||
# currently set within `designate_upload_channel`
|
||||
UPLOAD_CHANNEL=${UPLOAD_CHANNEL:-nightly}
|
||||
# Designates what subfolder to put packages into
|
||||
UPLOAD_SUBFOLDER=${UPLOAD_SUBFOLDER:-cpu}
|
||||
UPLOAD_BUCKET="s3://pytorch"
|
||||
BACKUP_BUCKET="s3://pytorch-backup"
|
||||
|
||||
DRY_RUN=${DRY_RUN:-enabled}
|
||||
# Don't actually do work unless explicit
|
||||
ANACONDA="true anaconda"
|
||||
AWS_S3_CP="aws s3 cp --dryrun"
|
||||
if [[ "${DRY_RUN}" = "disabled" ]]; then
|
||||
ANACONDA="anaconda"
|
||||
AWS_S3_CP="aws s3 cp"
|
||||
fi
|
||||
|
||||
do_backup() {
|
||||
local backup_dir
|
||||
backup_dir=$1
|
||||
(
|
||||
pushd /tmp/workspace
|
||||
set -x
|
||||
${AWS_S3_CP} --recursive . "${BACKUP_BUCKET}/${CIRCLE_TAG}/${backup_dir}/"
|
||||
)
|
||||
}
|
||||
|
||||
conda_upload() {
|
||||
(
|
||||
set -x
|
||||
${ANACONDA} \
|
||||
upload \
|
||||
${PKG_DIR}/*.tar.bz2 \
|
||||
-u "pytorch-${UPLOAD_CHANNEL}" \
|
||||
--label main \
|
||||
--no-progress \
|
||||
--force
|
||||
)
|
||||
}
|
||||
|
||||
s3_upload() {
|
||||
local extension
|
||||
local pkg_type
|
||||
extension="$1"
|
||||
pkg_type="$2"
|
||||
s3_dir="${UPLOAD_BUCKET}/${pkg_type}/${UPLOAD_CHANNEL}/${UPLOAD_SUBFOLDER}/"
|
||||
(
|
||||
for pkg in ${PKG_DIR}/*.${extension}; do
|
||||
(
|
||||
set -x
|
||||
${AWS_S3_CP} --no-progress --acl public-read "${pkg}" "${s3_dir}"
|
||||
)
|
||||
done
|
||||
)
|
||||
}
|
||||
|
||||
case "${PACKAGE_TYPE}" in
|
||||
conda)
|
||||
conda_upload
|
||||
# Fetch platform (eg. win-64, linux-64, etc.) from index file
|
||||
# Because there's no actual conda command to read this
|
||||
subdir=$(\
|
||||
tar -xOf ${PKG_DIR}/*.bz2 info/index.json \
|
||||
| grep subdir \
|
||||
| cut -d ':' -f2 \
|
||||
| sed -e 's/[[:space:]]//' -e 's/"//g' -e 's/,//' \
|
||||
)
|
||||
BACKUP_DIR="conda/${subdir}"
|
||||
;;
|
||||
libtorch)
|
||||
s3_upload "zip" "libtorch"
|
||||
BACKUP_DIR="libtorch/${UPLOAD_CHANNEL}/${UPLOAD_SUBFOLDER}"
|
||||
;;
|
||||
# wheel can either refer to wheel/manywheel
|
||||
*wheel)
|
||||
s3_upload "whl" "whl"
|
||||
BACKUP_DIR="whl/${UPLOAD_CHANNEL}/${UPLOAD_SUBFOLDER}"
|
||||
;;
|
||||
*)
|
||||
echo "ERROR: unknown package type: ${PACKAGE_TYPE}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# CIRCLE_TAG is defined by upstream circleci,
|
||||
# this can be changed to recognize tagged versions
|
||||
if [[ -n "${CIRCLE_TAG:-}" ]]; then
|
||||
do_backup "${BACKUP_DIR}"
|
||||
fi
|
||||
@ -1,49 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
source "/c/w/env"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
export CUDA_VERSION="${DESIRED_CUDA/cu/}"
|
||||
export USE_SCCACHE=1
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache-windows
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
|
||||
if [[ "$CUDA_VERSION" == "92" || "$CUDA_VERSION" == "100" ]]; then
|
||||
export VC_YEAR=2017
|
||||
else
|
||||
export VC_YEAR=2019
|
||||
fi
|
||||
|
||||
if [[ "${DESIRED_CUDA}" == "cu111" || "${DESIRED_CUDA}" == "cu113" ]]; then
|
||||
export BUILD_SPLIT_CUDA="ON"
|
||||
fi
|
||||
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}
|
||||
set -x
|
||||
|
||||
if [[ "$CIRCLECI" == 'true' && -d "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances" ]]; then
|
||||
mv "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances" .
|
||||
rm -rf "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||
mkdir -p "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||
mv _Instances "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||
fi
|
||||
|
||||
if [[ "$CIRCLECI" == 'true' && -d "C:\\Microsoft" ]]; then
|
||||
rm -rf "C:\\Microsoft\\Android*"
|
||||
fi
|
||||
|
||||
echo "Free space on filesystem before build:"
|
||||
df -h
|
||||
|
||||
pushd "$BUILDER_ROOT"
|
||||
if [[ "$PACKAGE_TYPE" == 'conda' ]]; then
|
||||
./windows/internal/build_conda.bat
|
||||
elif [[ "$PACKAGE_TYPE" == 'wheel' || "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
./windows/internal/build_wheels.bat
|
||||
fi
|
||||
|
||||
echo "Free space on filesystem after build:"
|
||||
df -h
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user