mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-26 00:24:53 +08:00
Compare commits
33 Commits
v1.12.1-rc
...
v1.10.2
| Author | SHA1 | Date | |
|---|---|---|---|
| 71f889c7d2 | |||
| 932ac7bd71 | |||
| 3e412cd6df | |||
| 302ee7bfb6 | |||
| 0c91a7063d | |||
| eadb03895a | |||
| 8416d630c9 | |||
| c78ceadbb0 | |||
| 70af72c794 | |||
| 36449ea931 | |||
| b544cbddfa | |||
| ddf3092581 | |||
| cc360fa38f | |||
| 3c134b8b1e | |||
| 4a514dd81e | |||
| c3ea586e32 | |||
| 9509e8a3d6 | |||
| 1774a6a2f4 | |||
| a27906c250 | |||
| 49f52b6c07 | |||
| 5f1a434599 | |||
| ecbf5a7439 | |||
| 4e3ebebcff | |||
| 2b46c95e7c | |||
| 5f3eee1ca5 | |||
| 4731f33d02 | |||
| ecfcb8ff5a | |||
| 6aadfda9e2 | |||
| 13666d20fd | |||
| 1fa17a20fc | |||
| c05547fa6c | |||
| 0e857bf109 | |||
| ad22804b95 |
63
.azure_pipelines/build-pipeline.yml
Normal file
63
.azure_pipelines/build-pipeline.yml
Normal file
@ -0,0 +1,63 @@
|
||||
# PyTorch CI Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on select configurations
|
||||
# 2) runs only TestTorch unit tests.
|
||||
|
||||
stages:
|
||||
- stage: 'Build'
|
||||
displayName: 'Build PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_ci_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
82
.azure_pipelines/daily-pipeline.yml
Normal file
82
.azure_pipelines/daily-pipeline.yml
Normal file
@ -0,0 +1,82 @@
|
||||
# PyTorch Daily Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on all available configurations
|
||||
# 2) runs all PyTorch unit tests
|
||||
|
||||
stages:
|
||||
- stage: 'BuildTest'
|
||||
displayName: 'Build and Test PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_daily_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
@ -0,0 +1,134 @@
|
||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
container_endpoint: ''
|
||||
os: ''
|
||||
cuda: ''
|
||||
is_ci_build: False
|
||||
is_official_build: False
|
||||
is_daily_build: False
|
||||
build_stage: False
|
||||
verify_stage: False
|
||||
publish_stage: False
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 300
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
DECODE_PERCENTS: false
|
||||
container:
|
||||
image: $[variables['container_image']]
|
||||
endpoint: ${{parameters.container_endpoint}}
|
||||
|
||||
steps:
|
||||
# Build stage
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Set up environment variables for specific pipeline build
|
||||
- template: set-environment-variables.yml
|
||||
parameters:
|
||||
os: ${{ parameters.os}}
|
||||
cuda: ${{ parameters.cuda}}
|
||||
is_official_build: ${{ parameters.is_official_build}}
|
||||
|
||||
# Sync and update PyTorch submodules
|
||||
- bash: git submodule update --init --recursive --jobs 0
|
||||
displayName: Update PyTorch submodules
|
||||
|
||||
# Build PyTorch and run unit tests - no packaging
|
||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
||||
# Build PyTorch from source in develop mode
|
||||
- bash: python setup.py develop
|
||||
displayName: Build PyTorch from source
|
||||
|
||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
||||
- bash: python test/test_torch.py TestTorch
|
||||
displayName: Run TestTorch unit tests
|
||||
|
||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
||||
# Run all unit tests to demonstrate successful PyTorch build
|
||||
- bash: python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
||||
displayName: Run all unit tests
|
||||
|
||||
# Run ComponentGovernance
|
||||
- task: ComponentGovernanceComponentDetection@0
|
||||
inputs:
|
||||
scanType: 'Register'
|
||||
verbosity: 'Verbose'
|
||||
alertWarningLevel: 'High'
|
||||
|
||||
# Build PyTorch and produce artifacts for verification stage
|
||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
||||
# Build PyTorch from source in install mode and exclude test binaries
|
||||
- bash: python setup.py install
|
||||
displayName: Build PyTorch from source without test binaries
|
||||
|
||||
# Package PyTorch Wheel
|
||||
- bash: python setup.py bdist_wheel
|
||||
displayName: Package PyTorch Wheel
|
||||
|
||||
# Publish PyTorch Wheel
|
||||
- task: PublishPipelineArtifact@1
|
||||
inputs:
|
||||
targetPath: $(Build.SourcesDirectory)/dist/
|
||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
||||
|
||||
# Verification stage
|
||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)/verify
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Install PyTorch Wheel on Windows
|
||||
- bash: python -m pip install $(Build.SourcesDirectory)/verify/torch*linux*.whl
|
||||
displayName: Install PyTorch Wheel
|
||||
|
||||
# Ensure PyTorch installed correctly from produced wheel
|
||||
- bash: |
|
||||
cd $(Build.SourcesDirectory)/verify
|
||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
||||
displayName: Check PyTorch correctly installed from wheel
|
||||
|
||||
# Publishing stage
|
||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)/publish
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Publish wheel to Azure Artifacts
|
||||
# The flag continueOnError=true is needed as the artifact to be published
|
||||
# may already exist, because the artifact is differentiated based on the
|
||||
# last commit date.
|
||||
- bash: |
|
||||
export TORCH_VERSION=$(head -c 5 ./version.txt)
|
||||
export LAST_COMMIT=$(git rev-parse --short HEAD)
|
||||
export LAST_COMMIT_DATE=$(git log -1 --pretty=%ad --date=format:%Y%m%d)
|
||||
cd $(Build.SourcesDirectory)/publish
|
||||
export TORCH_WHEEL=$(echo torch*linux*whl)
|
||||
az extension add -n azure-devops
|
||||
echo $ADOTOKEN | az devops login
|
||||
az artifacts universal publish --organization $AZURE_DEVOPS_ARTIFACTS_ORGANIZATION --project $AZURE_DEVOPS_ARTIFACTS_PROJECT --scope project --feed "PyTorch" --name $TORCH_WHEEL --description "PyTorch Official Build Artifact" --version $TORCH_VERSION-$LAST_COMMIT_DATE-$LAST_COMMIT --path .
|
||||
env:
|
||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
continueOnError: true
|
||||
displayName: Upload PyTorch Official Build package to Azure Artifacts
|
||||
@ -0,0 +1,150 @@
|
||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
os: ''
|
||||
cuda: ''
|
||||
is_ci_build: False
|
||||
is_official_build: False
|
||||
is_daily_build: False
|
||||
build_stage: False
|
||||
verify_stage: False
|
||||
publish_stage: False
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 300
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
CMAKE_GENERATOR: Ninja
|
||||
PACKAGE_PDBS: 0
|
||||
|
||||
steps:
|
||||
# Prepare for PyTorch build on Windows
|
||||
- template: prepare-build-template.yml
|
||||
parameters:
|
||||
configuration: $(configuration)
|
||||
build_stage: ${{ parameters.build_stage}}
|
||||
|
||||
# Build Stage
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Set up environment variables for specific pipeline build
|
||||
- template: set-environment-variables.yml
|
||||
parameters:
|
||||
os: ${{ parameters.os}}
|
||||
cuda: ${{ parameters.cuda}}
|
||||
is_official_build: ${{ parameters.is_official_build}}
|
||||
|
||||
# Sync and update PyTorch submodules
|
||||
- script: git submodule update --init --recursive --jobs 0
|
||||
displayName: Update PyTorch submodules
|
||||
|
||||
# Build PyTorch and run unit tests - no packaging
|
||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
||||
# Build PyTorch from source in develop mode with Ninja
|
||||
- script: call activate $(configuration) && python setup.py develop
|
||||
displayName: Build PyTorch from source
|
||||
|
||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
||||
- script: call activate $(configuration) && python test\test_torch.py TestTorch
|
||||
displayName: Run TestTorch unit tests
|
||||
|
||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
||||
# Run all unit tests to demonstrate successful PyTorch build
|
||||
- script: call activate $(configuration) && python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
||||
displayName: Run all unit tests
|
||||
|
||||
# Run ComponentGovernance
|
||||
- task: ComponentGovernanceComponentDetection@0
|
||||
inputs:
|
||||
scanType: 'Register'
|
||||
verbosity: 'Verbose'
|
||||
alertWarningLevel: 'High'
|
||||
|
||||
# Build PyTorch and produce artifacts for verification stage
|
||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
||||
# Build PyTorch from source in install mode with Ninja and exclude test binaries
|
||||
- script: call activate $(configuration) && python setup.py install
|
||||
displayName: Build PyTorch from source without test binaries
|
||||
|
||||
# Package PyTorch Wheel
|
||||
- script: call activate $(configuration) && python setup.py bdist_wheel
|
||||
displayName: Package PyTorch Wheel
|
||||
|
||||
# Publish PyTorch Wheel
|
||||
- task: PublishPipelineArtifact@1
|
||||
inputs:
|
||||
targetPath: $(Build.SourcesDirectory)\dist\
|
||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
||||
|
||||
# Verification Stage
|
||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)\verify
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Install PyTorch Wheel on Windows
|
||||
- script: |
|
||||
call activate $(configuration)
|
||||
cd $(Build.SourcesDirectory)\verify
|
||||
dir torch*win*.whl /b > whl.txt
|
||||
set /p whl= < whl.txt
|
||||
python -m pip install %whl%
|
||||
displayName: Install PyTorch Wheel
|
||||
|
||||
# Ensure PyTorch installed correctly from produced wheel
|
||||
- script: |
|
||||
call activate $(configuration)
|
||||
cd $(Build.SourcesDirectory)\verify
|
||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
||||
displayName: Check PyTorch correctly installed from wheel
|
||||
|
||||
# Publishing stage
|
||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
||||
# Download PyTorch Wheel
|
||||
- task: DownloadPipelineArtifact@2
|
||||
inputs:
|
||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
||||
path: $(Build.SourcesDirectory)\publish
|
||||
displayName: Download PyTorch Wheel
|
||||
|
||||
# Set up Azure Artifacts for Windows
|
||||
# The pip install --upgrade command is a bug fix for Azure CLI on Windows
|
||||
# More info: https://github.com/Azure/azure-cli/issues/16858
|
||||
- script: |
|
||||
pip install --upgrade pip --target \opt\az\lib\python3.6\site-packages\
|
||||
az extension add -n azure-devops
|
||||
displayName: Set up Azure Artifacts download on Windows
|
||||
|
||||
# Publish wheel to Azure Artifacts
|
||||
# The flag continueOnError=true is needed as the artifact to be published
|
||||
# may already exist, because the artifact is differentiated based on the
|
||||
# last commit date.
|
||||
- script: |
|
||||
set /p TORCH_VERSION= < version.txt
|
||||
cd $(Build.SourcesDirectory)\publish
|
||||
git rev-parse --short HEAD > last_commit.txt && set /p LAST_COMMIT= < last_commit.txt
|
||||
git log -1 --pretty=%ad --date=format:%Y%m%d > last_commit_date.txt && set /p LAST_COMMIT_DATE= < last_commit_date.txt
|
||||
dir torch*win*.whl /b > whl.txt && set /p TORCH_WHEEL= < whl.txt
|
||||
echo %ADOTOKEN% | az devops login
|
||||
az artifacts universal publish --organization %AZURE_DEVOPS_ARTIFACTS_ORGANIZATION% --project %AZURE_DEVOPS_ARTIFACTS_PROJECT% --scope project --feed "PyTorch" --name %TORCH_WHEEL% --description "PyTorch Official Build Artifact" --version %TORCH_VERSION:~0,5%-%LAST_COMMIT_DATE%-%LAST_COMMIT% --path .
|
||||
env:
|
||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
continueOnError: true
|
||||
displayName: Upload PyTorch nigthly package to Azure Artifacts
|
||||
17
.azure_pipelines/job_templates/common-packages.yml
Normal file
17
.azure_pipelines/job_templates/common-packages.yml
Normal file
@ -0,0 +1,17 @@
|
||||
dependencies:
|
||||
- python=PYTHON_VERSION
|
||||
- numpy
|
||||
- ninja
|
||||
- pyyaml
|
||||
- mkl
|
||||
- mkl-include
|
||||
- setuptools
|
||||
- cmake
|
||||
- cffi
|
||||
- typing_extensions
|
||||
- future
|
||||
- six
|
||||
- requests
|
||||
- dataclasses
|
||||
- pip:
|
||||
- -r ../../requirements.txt
|
||||
26
.azure_pipelines/job_templates/notify-webapp-template.yml
Normal file
26
.azure_pipelines/job_templates/notify-webapp-template.yml
Normal file
@ -0,0 +1,26 @@
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
steps:
|
||||
# Clone PyTorch Tests repository
|
||||
- bash: |
|
||||
B64_PAT=$(echo -n ":$_ADOTOKEN" | base64)
|
||||
git -c http.extraHeader="Authorization: Basic ${B64_PAT}" clone $(AZURE_DEVOPS_PYTORCH_TESTS_REPO_URL)
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
- bash: |
|
||||
bash $(Build.SourcesDirectory)/pytorch_tests/webapp/notify_webapp.sh
|
||||
displayName: Notify Webapp
|
||||
62
.azure_pipelines/job_templates/prepare-build-template.yml
Normal file
62
.azure_pipelines/job_templates/prepare-build-template.yml
Normal file
@ -0,0 +1,62 @@
|
||||
# Build prepare steps for PyTorch on Azure DevOps to build from source.
|
||||
# These steps share between normal build process and semmle security scan tasks
|
||||
|
||||
parameters:
|
||||
build_stage: False
|
||||
configuration: ''
|
||||
|
||||
steps:
|
||||
# End Python tasks that may be lingering over from previous runs
|
||||
# Note: If python.exe isn't currently running, exit code becomes 128,
|
||||
# which fails the run. Here exit code is set to 0 to avoid failed run.
|
||||
- script: |
|
||||
taskkill /f /im python.exe
|
||||
IF %ERRORLEVEL% EQU 128 exit 0
|
||||
displayName: End previous Python processes
|
||||
|
||||
# Clean up env directory in conda for fresh builds and set up conda environment YAML
|
||||
- powershell: |
|
||||
Remove-Item 'C:\Miniconda\envs' -Recurse -ErrorAction Ignore
|
||||
$env:PYTHON_VERSION = $env:SYSTEM_JOBNAME.Substring(3,1) + '.' + $env:SYSTEM_JOBNAME.Substring(4,1)
|
||||
(Get-Content .azure_pipelines\job_templates\common-packages.yml) -replace 'PYTHON_VERSION', $env:PYTHON_VERSION | Out-File -encoding ASCII .azure_pipelines\job_templates\common-packages.yml
|
||||
displayName: Clean up previous environments and Set up conda environment YAML
|
||||
|
||||
# Make conda environment and install required packages
|
||||
- script: |
|
||||
call conda clean --all -y
|
||||
call conda env create -n $(configuration) --file .azure_pipelines\job_templates\common-packages.yml
|
||||
call activate $(configuration)
|
||||
call conda install -c conda-forge libuv=1.39
|
||||
displayName: Set up conda environment for building from source
|
||||
|
||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
||||
# Install MKL
|
||||
- script: |
|
||||
rmdir /s /q mkl
|
||||
del mkl_2020.2.254.7z
|
||||
curl https://s3.amazonaws.com/ossci-windows/mkl_2020.2.254.7z -k -O
|
||||
7z x -aoa mkl_2020.2.254.7z -omkl
|
||||
displayName: Install MKL
|
||||
|
||||
# Install sccache and randomtemp
|
||||
# Related PyTorch GitHub issue: https://github.com/pytorch/pytorch/issues/25393
|
||||
# Related fix: https://github.com/pytorch/builder/pull/448/
|
||||
- script: |
|
||||
mkdir .\tmp_bin
|
||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output .\tmp_bin\sccache.exe
|
||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache-cl.exe --output .\tmp_bin\sccache-cl.exe
|
||||
copy .\tmp_bin\sccache.exe .\tmp_bin\nvcc.exe
|
||||
curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.3/randomtemp.exe --output .\tmp_bin\randomtemp.exe
|
||||
displayName: Install sccache and randomtemp
|
||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
||||
|
||||
# CUDA 11.2's CUB directory conflicts with CUDA 10.2 and 10.1
|
||||
# builds, where CUDA 11.2's CUB is injected into non-CUDA
|
||||
# 11.2 builds.
|
||||
- powershell: Remove-Item "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include\cub" -Recurse -ErrorAction Ignore
|
||||
displayName: Remove conflicting CUB from CUDA installation
|
||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
||||
|
||||
- powershell: Copy-Item -Path "F:\cuda_11_2\cub\" -Destination "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include" -Recurse
|
||||
displayName: Copy CUDA CUB for CUDA 11.2 build
|
||||
condition: eq(variables.CUDA_VERSION, '112')
|
||||
61
.azure_pipelines/job_templates/pytorch-template-unix.yml
Normal file
61
.azure_pipelines/job_templates/pytorch-template-unix.yml
Normal file
@ -0,0 +1,61 @@
|
||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
container_endpoint: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
variables:
|
||||
DECODE_PERCENTS: false
|
||||
|
||||
steps:
|
||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
||||
- checkout: none
|
||||
|
||||
# Delete pytorch_tests repo from previous builds if exists
|
||||
- bash: rm -rf pytorch_tests/
|
||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
||||
|
||||
# Clone PyTorch Tests repository
|
||||
- bash: |
|
||||
B64_PAT=$(echo -n ":$_ADOTOKEN" | base64)
|
||||
git -c http.extraHeader="Authorization: Basic ${B64_PAT}" clone $(AZURE_DEVOPS_PYTORCH_TESTS_REPO_URL)
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
|
||||
# Run PyTorch Unit Tests
|
||||
- bash: bash $(Build.SourcesDirectory)/pytorch_tests/scripts/linux/run.sh
|
||||
env:
|
||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
||||
_TS_P: $(TS_PAT)
|
||||
_TS_SM_P: $(TS_SM_PAT)
|
||||
_AZUREML_CLONE_PASSWORD: $(AZUREML_CLONE_PASSWORD)
|
||||
_SPPASSWORD: $(SPPASSWORD)
|
||||
displayName: Run PyTorch Unit Tests
|
||||
|
||||
# Tests results are available outside the docker container since
|
||||
# the current directory is mounted as a volume of the container.
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
inputs:
|
||||
testResultsFiles: '**/test-*.xml'
|
||||
testRunTitle: 'Publish test results for Python'
|
||||
57
.azure_pipelines/job_templates/pytorch-template-win.yml
Normal file
57
.azure_pipelines/job_templates/pytorch-template-win.yml
Normal file
@ -0,0 +1,57 @@
|
||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
||||
#
|
||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
||||
|
||||
parameters:
|
||||
name: ''
|
||||
pool: ''
|
||||
customMatrixes: ''
|
||||
|
||||
jobs:
|
||||
- job: ${{parameters.name}}
|
||||
timeoutInMinutes: 600
|
||||
strategy:
|
||||
matrix:
|
||||
${{ insert }}: ${{parameters.customMatrixes}}
|
||||
pool:
|
||||
name: ${{ parameters.pool}}
|
||||
|
||||
steps:
|
||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
||||
- checkout: none
|
||||
|
||||
# Delete pytorch_tests repo from previous builds if exists
|
||||
- script: if exist "pytorch_tests/" rmdir "pytorch_tests/" /q /s
|
||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
||||
|
||||
# Clone PyTorch Tests repository
|
||||
- powershell: |
|
||||
$env:B64Pat = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes(":$env:_ADOTOKEN"))
|
||||
git -c http.extraHeader="Authorization: Basic $env:B64Pat" clone $env:AZURE_DEVOPS_pytorch_tests_REPO_URL
|
||||
cd pytorch_tests
|
||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
displayName: Clone PyTorch Tests repo
|
||||
|
||||
# Run PyTorch Unit Tests
|
||||
- script: call $(Build.SourcesDirectory)\pytorch_tests\scripts\windows\run.bat
|
||||
env:
|
||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
||||
_TS_P: $(TS_PAT)
|
||||
_TS_SM_P: $(TS_SM_PAT)
|
||||
displayName: Run PyTorch Unit Tests
|
||||
|
||||
# Tests results are available outside the docker container since
|
||||
# the current directory is mounted as a volume of the container.
|
||||
- task: PublishTestResults@2
|
||||
condition: always()
|
||||
inputs:
|
||||
testResultsFiles: '**\test-*.xml'
|
||||
testRunTitle: 'Publish test results for Python'
|
||||
131
.azure_pipelines/job_templates/set-environment-variables.yml
Normal file
131
.azure_pipelines/job_templates/set-environment-variables.yml
Normal file
@ -0,0 +1,131 @@
|
||||
# Set environment variables for specific configurations
|
||||
|
||||
parameters:
|
||||
is_official_build: False
|
||||
os: ''
|
||||
cuda: ''
|
||||
|
||||
steps:
|
||||
# Environment configuration steps for Ubuntu builds
|
||||
- ${{ if contains(parameters.os, 'ubuntu') }}:
|
||||
# Set configuration specific build flags
|
||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
||||
export PYTORCH_VERSION=$(head -c 5 ./version.txt)
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
||||
displayName: Set configuration-specific build flags
|
||||
|
||||
# Set PyTorch CPU/GPU build flags.
|
||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=USE_CUDA;]0"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
||||
displayName: Set CUDA-specific build flag for CPU builds
|
||||
|
||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=USE_CUDA;]1"
|
||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
||||
displayName: Set CUDA-specific build flag for GPU builds
|
||||
|
||||
# Set MKL environment variables
|
||||
- bash: |
|
||||
echo "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]/opt/intel/lib:$CMAKE_LIBRARY_PATH"
|
||||
echo "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]/opt/intel/include:$CMAKE_INCLUDE_PATH"
|
||||
displayName: Set MKL paths
|
||||
|
||||
# View current environment variables
|
||||
- bash:
|
||||
printenv
|
||||
displayName: Show environment variables
|
||||
|
||||
# Environment configuration steps for Windows builds
|
||||
- ${{ if contains(parameters.os, 'windows') }}:
|
||||
# Set Conda Lib Path
|
||||
- powershell: Write-Host "##vso[task.setvariable variable=CONDA_LIB_PATH;]C:\Miniconda\envs\$(configuration)\Library\bin"
|
||||
displayName: Set Conda Lib Path
|
||||
|
||||
# Set configuration specific build flags
|
||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
||||
Set-Variable -Name PYTORCH_VERSION -Value (Get-Content .\version.txt).Substring(0,5)
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
||||
displayName: Set configuration-specific build flags
|
||||
|
||||
# Set PyTorch CPU/GPU build flags..
|
||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]0"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
||||
displayName: Set CUDA-specific build flag for CPU build
|
||||
|
||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]1"
|
||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
||||
displayName: Set CUDA-specific build flag for GPU build
|
||||
|
||||
# Set CUDA 11.2, 10.2 or 10.1 specific build flags
|
||||
- ${{ if eq(parameters.cuda, 'gpu') }}:
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5;8.0;8.6"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\"
|
||||
displayName: Set CUDA 11.2 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '112')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\"
|
||||
displayName: Set CUDA 10.2 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '102')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\"
|
||||
displayName: Set CUDA 10.1 specific build flags
|
||||
condition: eq(variables.CUDA_VERSION, '101')
|
||||
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_BIN_PATH;]$env:CUDA_PATH\bin\"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_ROOT;]$env:CUDA_PATH"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_INCLUDE_DIR;]$env:CUDA_PATH\include\"
|
||||
Write-Host "##vso[task.setvariable variable=CUDNN_LIBRARY;]$env:CUDA_PATH\lib\x64\"
|
||||
Write-Host "##vso[task.prependpath]$env:CUDA_PATH\bin"
|
||||
Write-Host "##vso[task.setvariable variable=TORCH_NVCC_FLAGS;]-Xfatbin -compress-all --no-host-device-move-forward"
|
||||
Write-Host "##vso[task.setvariable variable=THRUST_IGNORE_CUB_VERSION_CHECK;]1"
|
||||
Write-Host "##vso[task.setvariable variable=NVTOOLSEXT_PATH;]C:\Program Files\NVIDIA Corporation\NvToolsExt\"
|
||||
displayName: Set CUDA environment variables
|
||||
|
||||
- powershell: |
|
||||
copy "$(CUDA_BIN_PATH)\cusparse*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cublas*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cudart*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\curand*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cufft*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cusolver*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\cudnn*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CUDA_BIN_PATH)\nvrtc*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64\nvToolsExt64_1.dll*" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CONDA_LIB_PATH)\libiomp*5md.dll" $(Build.SourcesDirectory)\torch\lib
|
||||
copy "$(CONDA_LIB_PATH)\uv.dll" $(Build.SourcesDirectory)\torch\lib
|
||||
displayName: Copy CUDA/cuDNN/libomp/libuv dlls to torch\lib
|
||||
|
||||
# Set MKL, sccache and randomtemp environment variables
|
||||
- powershell: |
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]$(Build.SourcesDirectory)\mkl\include"
|
||||
Write-Host "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]$(Build.SourcesDirectory)\mkl\lib;$env:CMAKE_LIBRARY_PATH"
|
||||
Write-Host "##vso[task.setvariable variable=ADDITIONAL_PATH;]$(Build.SourcesDirectory)\tmp_bin"
|
||||
Write-Host "##vso[task.setvariable variable=SCCACHE_IDLE_TIMEOUT;]1500"
|
||||
Write-Host "##vso[task.setvariable variable=RANDOMTEMP_EXECUTABLE;]$(Build.SourcesDirectory)\tmp_bin\nvcc.exe"
|
||||
Write-Host "##vso[task.setvariable variable=CUDA_NVCC_EXECUTABLE;]$(Build.SourcesDirectory)\tmp_bin\randomtemp.exe"
|
||||
Write-Host "##vso[task.setvariable variable=RANDOMTEMP_BASEDIR;]$(Build.SourcesDirectory)\tmp_bin"
|
||||
displayName: Set MKL, sccache and randomtemp environment variables
|
||||
|
||||
# View current environment variables
|
||||
- script:
|
||||
set
|
||||
displayName: Show environment variables
|
||||
14
.azure_pipelines/job_templates/wheel-wait-job-template.yml
Normal file
14
.azure_pipelines/job_templates/wheel-wait-job-template.yml
Normal file
@ -0,0 +1,14 @@
|
||||
# Main logic to initiate wait for PR artifact to be ready
|
||||
|
||||
steps:
|
||||
- task: InvokeRESTAPI@1
|
||||
displayName: 'Wait for job success and wheel ready'
|
||||
timeoutInMinutes: 60
|
||||
inputs:
|
||||
connectionType: 'connectedServiceName'
|
||||
serviceConnection: circleciconn
|
||||
method: 'POST'
|
||||
headers: '{"Content-Type":"application/json", "BranchName":"$(_TARGET_BRANCH_TO_CHECK)", "JobName":"$(TARGET_CIRCLECI_BUILD_PR)", "PRNumber":"$(_TARGET_PR_NUMBER)", "TargetCommit":"$(_TARGET_COMMIT)", "PlanUrl":"$(System.CollectionUri)", "ProjectId":"$(System.TeamProjectId)", "HubName":"$(System.HostType)", "PlanId":"$(System.PlanId)", "JobId":"$(System.JobId)", "TimelineId":"$(System.TimelineId)", "TaskInstanceId":"$(System.TaskInstanceId)", "AuthToken":"$(System.AccessToken)"}'
|
||||
body: ''
|
||||
urlSuffix: 'api/JobStatus'
|
||||
waitForCompletion: true
|
||||
92
.azure_pipelines/job_templates/wheel-wait-template.yml
Normal file
92
.azure_pipelines/job_templates/wheel-wait-template.yml
Normal file
@ -0,0 +1,92 @@
|
||||
# Initiate 5 agentless-server waiting jobs to check on the
|
||||
# status of PR artifact builds, for a maximum wait time of
|
||||
# 11*60 min=660 mins. These jobs will pass immediately
|
||||
# once targeted CircleCI build is ready.
|
||||
|
||||
jobs:
|
||||
- job: checkjob1
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob2
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob1
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob3
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob2
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob4
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob3
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob5
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob4
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob6
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob5
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob7
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob6
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob8
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob7
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob9
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob8
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob10
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob9
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
|
||||
- job: checkjob11
|
||||
pool: server
|
||||
timeoutInMinutes: 60
|
||||
dependsOn: checkjob10
|
||||
continueOnError: true
|
||||
steps:
|
||||
- template: wheel-wait-job-template.yml
|
||||
60
.azure_pipelines/nightly-pytorch-tests-pipeline.yml
Normal file
60
.azure_pipelines/nightly-pytorch-tests-pipeline.yml
Normal file
@ -0,0 +1,60 @@
|
||||
# PyTorch Nightly PyTorch Tests Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline runs custom PyTorch unit-tests on nightly
|
||||
# PyTorch wheels.
|
||||
|
||||
stages:
|
||||
- stage: 'NightlyCustomTests'
|
||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
||||
jobs:
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_1)
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_1)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_1)
|
||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
||||
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: $(BUILD_POOL_LIN_2)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_2)
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_2)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_2)
|
||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
||||
|
||||
- template: job_templates/pytorch-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: $(BUILD_POOL_WIN_1)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_1)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_1)
|
||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
||||
|
||||
- template: job_templates/pytorch-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: $(BUILD_POOL_WIN_2)
|
||||
customMatrixes:
|
||||
Nightly_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_2)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_2)
|
||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
||||
|
||||
- stage: 'NotifyWebapp'
|
||||
displayName: 'Notify Webapp that pipeline is finished'
|
||||
dependsOn: NightlyCustomTests
|
||||
condition: succeededOrFailed()
|
||||
jobs:
|
||||
- template: job_templates/notify-webapp-template.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
62
.azure_pipelines/pytorch-tests-pipeline.yml
Normal file
62
.azure_pipelines/pytorch-tests-pipeline.yml
Normal file
@ -0,0 +1,62 @@
|
||||
# PyTorch PR PyTorch Tests Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) ensures that CircleCI builds for a given PR
|
||||
# have finished, and that its artifacts are
|
||||
# ready for download
|
||||
# 2) runs custom PyTorch unit-tests on PyTorch
|
||||
# wheels generated during PR builds.
|
||||
|
||||
resources:
|
||||
webhooks:
|
||||
- webhook: GitHubPyTorchPRTrigger
|
||||
connection: GitHubPyTorchPRTriggerConnection
|
||||
filters:
|
||||
- path: repositoryName
|
||||
value: pytorch_tests
|
||||
|
||||
stages:
|
||||
- stage: 'EnsureArtifactsReady'
|
||||
displayName: 'Ensure PyTorch PR Artifacts are ready'
|
||||
jobs:
|
||||
- template: job_templates/wheel-wait-template.yml
|
||||
variables:
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_TARGET_PR_NUMBER: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
|
||||
- stage: 'PRCustomTests'
|
||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
||||
dependsOn: EnsureArtifactsReady
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/pytorch-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: $(BUILD_POOL_PR)
|
||||
customMatrixes:
|
||||
PR_Custom_Tests:
|
||||
_PYTHON_VERSION: $(PYTHON_VERSION_PR)
|
||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_PR)
|
||||
_TARGET_CIRCLECI_BUILD: $(TARGET_CIRCLECI_BUILD_PR)
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_TARGET_PR_NUMBER: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_PR)
|
||||
_RUN_TESTS: $(RUN_TESTS_PR)
|
||||
|
||||
- stage: 'NotifyWebapp'
|
||||
displayName: 'Notify Webapp that pipeline is finished'
|
||||
dependsOn: PRCustomTests
|
||||
condition: succeededOrFailed()
|
||||
jobs:
|
||||
- template: job_templates/notify-webapp-template.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU
|
||||
pool: $(BUILD_POOL_LIN_1)
|
||||
customMatrixes:
|
||||
PR_Notify_WebApp:
|
||||
_TARGET_CIRCLECI_BUILD: $(TARGET_CIRCLECI_BUILD_PR)
|
||||
_TARGET_BRANCH_TO_CHECK: ${{parameters.GitHubPyTorchPRTrigger.TARGET_BRANCH_TO_CHECK_AZ_DEVOPS_PR}}
|
||||
_TARGET_PR_NUMBER: ${{parameters.GitHubPyTorchPRTrigger.PR_NUMBER}}
|
||||
_TARGET_COMMIT: ${{parameters.GitHubPyTorchPRTrigger.TARGET_COMMIT}}
|
||||
224
.azure_pipelines/verify-pipeline.yml
Normal file
224
.azure_pipelines/verify-pipeline.yml
Normal file
@ -0,0 +1,224 @@
|
||||
# PyTorch Official Builds Pipeline on Azure DevOps
|
||||
#
|
||||
# This pipeline:
|
||||
# 1) builds PyTorch on all available configurations
|
||||
# 2) verifies PyTorch artifacts by installing them in a clean environment
|
||||
# and checking torch.__version_
|
||||
# 3) publishes official PyTorch artifacts to Azure DevOps Artifacts for consumption
|
||||
|
||||
stages:
|
||||
- stage: 'Build'
|
||||
displayName: 'Build PyTorch'
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: ubuntu
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: ubuntu
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: windows
|
||||
cuda: cpu
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
build_stage: True
|
||||
is_official_build: True
|
||||
os: windows
|
||||
cuda: gpu
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- stage: 'Verify'
|
||||
displayName: 'Verify PyTorch wheels'
|
||||
dependsOn: Build
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
verify_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- stage: 'Publish'
|
||||
displayName: 'Publish PyTorch wheels'
|
||||
dependsOn: Verify
|
||||
condition: succeeded()
|
||||
jobs:
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_CPU_docker
|
||||
pool: 'PyTorch-Linux-CPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: ubuntu_1804_py_38_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
||||
Py_37:
|
||||
configuration: ubuntu_1804_py_37_cpu
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
||||
|
||||
- template: job_templates/build-verify-publish-template-unix.yml
|
||||
parameters:
|
||||
name: ubuntu_1804_GPU_docker
|
||||
pool: 'PyTorch-Linux-GPU'
|
||||
container_endpoint: pytorchms.azurecr.io
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_810:
|
||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_765:
|
||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
||||
CUDA_VERSION: 101
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_CPU
|
||||
pool: 'PyTorch-Win-CPU'
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_38:
|
||||
configuration: windows_2019_py_38_cpu
|
||||
Py_37:
|
||||
configuration: windows_2019_py_37_cpu
|
||||
|
||||
- template: job_templates/build-verify-publish-template-win.yml
|
||||
parameters:
|
||||
name: windows_2019_GPU
|
||||
pool: 'PyTorch-Win-GPU'
|
||||
publish_stage: True
|
||||
is_official_build: True
|
||||
customMatrixes:
|
||||
Py_39_CUDA_112_cuDNN_810:
|
||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
||||
CUDA_VERSION: 112
|
||||
Py_38_CUDA_102_cuDNN_765:
|
||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
||||
CUDA_VERSION: 102
|
||||
Py_37_CUDA_101_cuDNN_764:
|
||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
||||
CUDA_VERSION: 101
|
||||
16
.bazelrc
16
.bazelrc
@ -1,12 +1,6 @@
|
||||
build --cxxopt=--std=c++14
|
||||
build --copt=--std=c++14
|
||||
build --copt=-I.
|
||||
# Bazel does not support including its cc_library targets as system
|
||||
# headers. We work around this for generated code
|
||||
# (e.g. c10/macros/cmake_macros.h) by making the generated directory a
|
||||
# system include path.
|
||||
build --copt=-isystem --copt bazel-out/k8-fastbuild/bin
|
||||
build --copt=-isystem --copt bazel-out/darwin-fastbuild/bin
|
||||
build --experimental_ui_max_stdouterr_bytes=2048576
|
||||
|
||||
# Configuration to disable tty features for environments like CI
|
||||
build:no-tty --curses no
|
||||
@ -17,11 +11,3 @@ build:no-tty --show_progress_rate_limit 10
|
||||
build:gpu --define=cuda=true
|
||||
# define a separate build folder for faster switching between configs
|
||||
build:gpu --platform_suffix=-gpu
|
||||
# See the note on the config-less build for details about why we are
|
||||
# doing this. We must also do it for the "-gpu" platform suffix.
|
||||
build --copt=-isystem --copt=bazel-out/k8-fastbuild-gpu/bin
|
||||
# rules_cuda configuration
|
||||
build:gpu --@rules_cuda//cuda:enable_cuda
|
||||
build:gpu --@rules_cuda//cuda:cuda_targets=sm_52
|
||||
build:gpu --@rules_cuda//cuda:compiler=nvcc
|
||||
build:gpu --repo_env=CUDA_PATH=/usr/local/cuda
|
||||
|
||||
@ -1,15 +0,0 @@
|
||||
[buildfile]
|
||||
name = BUILD.buck
|
||||
|
||||
[repositories]
|
||||
bazel_skylib = third_party/bazel-skylib/
|
||||
|
||||
[download]
|
||||
in_build = true
|
||||
|
||||
[cxx]
|
||||
cxxflags = -std=c++17
|
||||
should_remap_host_platform = true
|
||||
|
||||
[project]
|
||||
default_flavors_mode=all
|
||||
498
.circleci/README.md
Normal file
498
.circleci/README.md
Normal file
@ -0,0 +1,498 @@
|
||||
Structure of CI
|
||||
===============
|
||||
|
||||
setup job:
|
||||
1. Does a git checkout
|
||||
2. Persists CircleCI scripts (everything in `.circleci`) into a workspace. Why?
|
||||
We don't always do a Git checkout on all subjobs, but we usually
|
||||
still want to be able to call scripts one way or another in a subjob.
|
||||
Persisting files this way lets us have access to them without doing a
|
||||
checkout. This workspace is conventionally mounted on `~/workspace`
|
||||
(this is distinguished from `~/project`, which is the conventional
|
||||
working directory that CircleCI will default to starting your jobs
|
||||
in.)
|
||||
3. Write out the commit message to `.circleci/COMMIT_MSG`. This is so
|
||||
we can determine in subjobs if we should actually run the jobs or
|
||||
not, even if there isn't a Git checkout.
|
||||
|
||||
|
||||
|
||||
|
||||
CircleCI configuration generator
|
||||
================================
|
||||
|
||||
One may no longer make changes to the `.circleci/config.yml` file directly.
|
||||
Instead, one must edit these Python scripts or files in the `verbatim-sources/` directory.
|
||||
|
||||
|
||||
Usage
|
||||
----------
|
||||
|
||||
1. Make changes to these scripts.
|
||||
2. Run the `regenerate.sh` script in this directory and commit the script changes and the resulting change to `config.yml`.
|
||||
|
||||
You'll see a build failure on GitHub if the scripts don't agree with the checked-in version.
|
||||
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
These scripts establish a single, authoritative source of documentation for the CircleCI configuration matrix.
|
||||
The documentation, in the form of diagrams, is automatically generated and cannot drift out of sync with the YAML content.
|
||||
|
||||
Furthermore, consistency is enforced within the YAML config itself, by using a single source of data to generate
|
||||
multiple parts of the file.
|
||||
|
||||
* Facilitates one-off culling/enabling of CI configs for testing PRs on special targets
|
||||
|
||||
Also see https://github.com/pytorch/pytorch/issues/17038
|
||||
|
||||
|
||||
Future direction
|
||||
----------------
|
||||
|
||||
### Declaring sparse config subsets
|
||||
See comment [here](https://github.com/pytorch/pytorch/pull/17323#pullrequestreview-206945747):
|
||||
|
||||
In contrast with a full recursive tree traversal of configuration dimensions,
|
||||
> in the future I think we actually want to decrease our matrix somewhat and have only a few mostly-orthogonal builds that taste as many different features as possible on PRs, plus a more complete suite on every PR and maybe an almost full suite nightly/weekly (we don't have this yet). Specifying PR jobs in the future might be easier to read with an explicit list when we come to this.
|
||||
|
||||
----------------
|
||||
----------------
|
||||
|
||||
# How do the binaries / nightlies / releases work?
|
||||
|
||||
### What is a binary?
|
||||
|
||||
A binary or package (used interchangeably) is a pre-built collection of c++ libraries, header files, python bits, and other files. We build these and distribute them so that users do not need to install from source.
|
||||
|
||||
A **binary configuration** is a collection of
|
||||
|
||||
* release or nightly
|
||||
* releases are stable, nightlies are beta and built every night
|
||||
* python version
|
||||
* linux: 3.5m, 3.6m 3.7m (mu is wide unicode or something like that. It usually doesn't matter but you should know that it exists)
|
||||
* macos: 3.6, 3.7, 3.8
|
||||
* windows: 3.6, 3.7, 3.8
|
||||
* cpu version
|
||||
* cpu, cuda 9.0, cuda 10.0
|
||||
* The supported cuda versions occasionally change
|
||||
* operating system
|
||||
* Linux - these are all built on CentOS. There haven't been any problems in the past building on CentOS and using on Ubuntu
|
||||
* MacOS
|
||||
* Windows - these are built on Azure pipelines
|
||||
* devtoolset version (gcc compiler version)
|
||||
* This only matters on Linux cause only Linux uses gcc. tldr is gcc made a backwards incompatible change from gcc 4.8 to gcc 5, because it had to change how it implemented std::vector and std::string
|
||||
|
||||
### Where are the binaries?
|
||||
|
||||
The binaries are built in CircleCI. There are nightly binaries built every night at 9pm PST (midnight EST) and release binaries corresponding to Pytorch releases, usually every few months.
|
||||
|
||||
We have 3 types of binary packages
|
||||
|
||||
* pip packages - nightlies are stored on s3 (pip install -f \<a s3 url\>). releases are stored in a pip repo (pip install torch) (ask Soumith about this)
|
||||
* conda packages - nightlies and releases are both stored in a conda repo. Nighty packages have a '_nightly' suffix
|
||||
* libtorch packages - these are zips of all the c++ libraries, header files, and sometimes dependencies. These are c++ only
|
||||
* shared with dependencies (the only supported option for Windows)
|
||||
* static with dependencies
|
||||
* shared without dependencies
|
||||
* static without dependencies
|
||||
|
||||
All binaries are built in CircleCI workflows except Windows. There are checked-in workflows (committed into the .circleci/config.yml) to build the nightlies every night. Releases are built by manually pushing a PR that builds the suite of release binaries (overwrite the config.yml to build the release)
|
||||
|
||||
# CircleCI structure of the binaries
|
||||
|
||||
Some quick vocab:
|
||||
|
||||
* A \**workflow** is a CircleCI concept; it is a DAG of '**jobs**'. ctrl-f 'workflows' on https://github.com/pytorch/pytorch/blob/master/.circleci/config.yml to see the workflows.
|
||||
* **jobs** are a sequence of '**steps**'
|
||||
* **steps** are usually just a bash script or a builtin CircleCI command. *All steps run in new environments, environment variables declared in one script DO NOT persist to following steps*
|
||||
* CircleCI has a **workspace**, which is essentially a cache between steps of the *same job* in which you can store artifacts between steps.
|
||||
|
||||
## How are the workflows structured?
|
||||
|
||||
The nightly binaries have 3 workflows. We have one job (actually 3 jobs: build, test, and upload) per binary configuration
|
||||
|
||||
1. binary_builds
|
||||
1. every day midnight EST
|
||||
2. linux: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
3. macos: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
4. For each binary configuration, e.g. linux_conda_3.7_cpu there is a
|
||||
1. binary_linux_conda_3.7_cpu_build
|
||||
1. Builds the build. On linux jobs this uses the 'docker executor'.
|
||||
2. Persists the package to the workspace
|
||||
2. binary_linux_conda_3.7_cpu_test
|
||||
1. Loads the package to the workspace
|
||||
2. Spins up a docker image (on Linux), mapping the package and code repos into the docker
|
||||
3. Runs some smoke tests in the docker
|
||||
4. (Actually, for macos this is a step rather than a separate job)
|
||||
3. binary_linux_conda_3.7_cpu_upload
|
||||
1. Logs in to aws/conda
|
||||
2. Uploads the package
|
||||
2. update_s3_htmls
|
||||
1. every day 5am EST
|
||||
2. https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
3. See below for what these are for and why they're needed
|
||||
4. Three jobs that each examine the current contents of aws and the conda repo and update some html files in s3
|
||||
3. binarysmoketests
|
||||
1. every day
|
||||
2. https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
3. For each binary configuration, e.g. linux_conda_3.7_cpu there is a
|
||||
1. smoke_linux_conda_3.7_cpu
|
||||
1. Downloads the package from the cloud, e.g. using the official pip or conda instructions
|
||||
2. Runs the smoke tests
|
||||
|
||||
## How are the jobs structured?
|
||||
|
||||
The jobs are in https://github.com/pytorch/pytorch/tree/master/.circleci/verbatim-sources. Jobs are made of multiple steps. There are some shared steps used by all the binaries/smokes. Steps of these jobs are all delegated to scripts in https://github.com/pytorch/pytorch/tree/master/.circleci/scripts .
|
||||
|
||||
* Linux jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/linux-binary-build-defaults.yml
|
||||
* binary_linux_build.sh
|
||||
* binary_linux_test.sh
|
||||
* binary_linux_upload.sh
|
||||
* MacOS jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/macos-binary-build-defaults.yml
|
||||
* binary_macos_build.sh
|
||||
* binary_macos_test.sh
|
||||
* binary_macos_upload.sh
|
||||
* Update html jobs: https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/binary_update_htmls.yml
|
||||
* These delegate from the pytorch/builder repo
|
||||
* https://github.com/pytorch/builder/blob/master/cron/update_s3_htmls.sh
|
||||
* https://github.com/pytorch/builder/blob/master/cron/upload_binary_sizes.sh
|
||||
* Smoke jobs (both linux and macos): https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/nightly-build-smoke-tests-defaults.yml
|
||||
* These delegate from the pytorch/builder repo
|
||||
* https://github.com/pytorch/builder/blob/master/run_tests.sh
|
||||
* https://github.com/pytorch/builder/blob/master/smoke_test.sh
|
||||
* https://github.com/pytorch/builder/blob/master/check_binary.sh
|
||||
* Common shared code (shared across linux and macos): https://github.com/pytorch/pytorch/blob/master/.circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
* binary_checkout.sh - checks out pytorch/builder repo. Right now this also checks out pytorch/pytorch, but it shouldn't. pytorch/pytorch should just be shared through the workspace. This can handle being run before binary_populate_env.sh
|
||||
* binary_populate_env.sh - parses BUILD_ENVIRONMENT into the separate env variables that make up a binary configuration. Also sets lots of default values, the date, the version strings, the location of folders in s3, all sorts of things. This generally has to be run before other steps.
|
||||
* binary_install_miniconda.sh - Installs miniconda, cross platform. Also hacks this for the update_binary_sizes job that doesn't have the right env variables
|
||||
* binary_run_in_docker.sh - Takes a bash script file (the actual test code) from a hardcoded location, spins up a docker image, and runs the script inside the docker image
|
||||
|
||||
### **Why do the steps all refer to scripts?**
|
||||
|
||||
CircleCI creates a final yaml file by inlining every <<* segment, so if we were to keep all the code in the config.yml itself then the config size would go over 4 MB and cause infra problems.
|
||||
|
||||
### **What is binary_run_in_docker for?**
|
||||
|
||||
So, CircleCI has several executor types: macos, machine, and docker are the ones we use. The 'machine' executor gives you two cores on some linux vm. The 'docker' executor gives you considerably more cores (nproc was 32 instead of 2 back when I tried in February). Since the dockers are faster, we try to run everything that we can in dockers. Thus
|
||||
|
||||
* linux build jobs use the docker executor. Running them on the docker executor was at least 2x faster than running them on the machine executor
|
||||
* linux test jobs use the machine executor in order for them to properly interface with GPUs since docker executors cannot execute with attached GPUs
|
||||
* linux upload jobs use the machine executor. The upload jobs are so short that it doesn't really matter what they use
|
||||
* linux smoke test jobs use the machine executor for the same reason as the linux test jobs
|
||||
|
||||
binary_run_in_docker.sh is a way to share the docker start-up code between the binary test jobs and the binary smoke test jobs
|
||||
|
||||
### **Why does binary_checkout also checkout pytorch? Why shouldn't it?**
|
||||
|
||||
We want all the nightly binary jobs to run on the exact same git commit, so we wrote our own checkout logic to ensure that the same commit was always picked. Later circleci changed that to use a single pytorch checkout and persist it through the workspace (they did this because our config file was too big, so they wanted to take a lot of the setup code into scripts, but the scripts needed the code repo to exist to be called, so they added a prereq step called 'setup' to checkout the code and persist the needed scripts to the workspace). The changes to the binary jobs were not properly tested, so they all broke from missing pytorch code no longer existing. We hotfixed the problem by adding the pytorch checkout back to binary_checkout, so now there's two checkouts of pytorch on the binary jobs. This problem still needs to be fixed, but it takes careful tracing of which code is being called where.
|
||||
|
||||
# Azure Pipelines structure of the binaries
|
||||
|
||||
TODO: fill in stuff
|
||||
|
||||
## How are the workflows structured?
|
||||
|
||||
TODO: fill in stuff
|
||||
|
||||
## How are the jobs structured?
|
||||
|
||||
TODO: fill in stuff
|
||||
|
||||
# Code structure of the binaries (circleci agnostic)
|
||||
|
||||
## Overview
|
||||
|
||||
The code that runs the binaries lives in two places, in the normal [github.com/pytorch/pytorch](http://github.com/pytorch/pytorch), but also in [github.com/pytorch/builder](http://github.com/pytorch/builder), which is a repo that defines how all the binaries are built. The relevant code is
|
||||
|
||||
|
||||
```
|
||||
# All code needed to set-up environments for build code to run in,
|
||||
# but only code that is specific to the current CI system
|
||||
pytorch/pytorch
|
||||
- .circleci/ # Folder that holds all circleci related stuff
|
||||
- config.yml # GENERATED file that actually controls all circleci behavior
|
||||
- verbatim-sources # Used to generate job/workflow sections in ^
|
||||
- scripts/ # Code needed to prepare circleci environments for binary build scripts
|
||||
|
||||
- setup.py # Builds pytorch. This is wrapped in pytorch/builder
|
||||
- cmake files # used in normal building of pytorch
|
||||
|
||||
# All code needed to prepare a binary build, given an environment
|
||||
# with all the right variables/packages/paths.
|
||||
pytorch/builder
|
||||
|
||||
# Given an installed binary and a proper python env, runs some checks
|
||||
# to make sure the binary was built the proper way. Checks things like
|
||||
# the library dependencies, symbols present, etc.
|
||||
- check_binary.sh
|
||||
|
||||
# Given an installed binary, runs python tests to make sure everything
|
||||
# is in order. These should be de-duped. Right now they both run smoke
|
||||
# tests, but are called from different places. Usually just call some
|
||||
# import statements, but also has overlap with check_binary.sh above
|
||||
- run_tests.sh
|
||||
- smoke_test.sh
|
||||
|
||||
# Folders that govern how packages are built. See paragraphs below
|
||||
|
||||
- conda/
|
||||
- build_pytorch.sh # Entrypoint. Delegates to proper conda build folder
|
||||
- switch_cuda_version.sh # Switches activate CUDA installation in Docker
|
||||
- pytorch-nightly/ # Build-folder
|
||||
- manywheel/
|
||||
- build_cpu.sh # Entrypoint for cpu builds
|
||||
- build.sh # Entrypoint for CUDA builds
|
||||
- build_common.sh # Actual build script that ^^ call into
|
||||
- wheel/
|
||||
- build_wheel.sh # Entrypoint for wheel builds
|
||||
- windows/
|
||||
- build_pytorch.bat # Entrypoint for wheel builds on Windows
|
||||
```
|
||||
|
||||
Every type of package has an entrypoint build script that handles the all the important logic.
|
||||
|
||||
## Conda
|
||||
|
||||
Linux, MacOS and Windows use the same code flow for the conda builds.
|
||||
|
||||
Conda packages are built with conda-build, see https://conda.io/projects/conda-build/en/latest/resources/commands/conda-build.html
|
||||
|
||||
Basically, you pass `conda build` a build folder (pytorch-nightly/ above) that contains a build script and a meta.yaml. The meta.yaml specifies in what python environment to build the package in, and what dependencies the resulting package should have, and the build script gets called in the env to build the thing.
|
||||
tl;dr on conda-build is
|
||||
|
||||
1. Creates a brand new conda environment, based off of deps in the meta.yaml
|
||||
1. Note that environment variables do not get passed into this build env unless they are specified in the meta.yaml
|
||||
2. If the build fails this environment will stick around. You can activate it for much easier debugging. The “General Python” section below explains what exactly a python “environment” is.
|
||||
2. Calls build.sh in the environment
|
||||
3. Copies the finished package to a new conda env, also specified by the meta.yaml
|
||||
4. Runs some simple import tests (if specified in the meta.yaml)
|
||||
5. Saves the finished package as a tarball
|
||||
|
||||
The build.sh we use is essentially a wrapper around `python setup.py build`, but it also manually copies in some of our dependent libraries into the resulting tarball and messes with some rpaths.
|
||||
|
||||
The entrypoint file `builder/conda/build_conda.sh` is complicated because
|
||||
|
||||
* It works for Linux, MacOS and Windows
|
||||
* The mac builds used to create their own environments, since they all used to be on the same machine. There’s now a lot of extra logic to handle conda envs. This extra machinery could be removed
|
||||
* It used to handle testing too, which adds more logic messing with python environments too. This extra machinery could be removed.
|
||||
|
||||
## Manywheels (linux pip and libtorch packages)
|
||||
|
||||
Manywheels are pip packages for linux distros. Note that these manywheels are not actually manylinux compliant.
|
||||
|
||||
`builder/manywheel/build_cpu.sh` and `builder/manywheel/build.sh` (for CUDA builds) just set different env vars and then call into `builder/manywheel/build_common.sh`
|
||||
|
||||
The entrypoint file `builder/manywheel/build_common.sh` is really really complicated because
|
||||
|
||||
* This used to handle building for several different python versions at the same time. The loops have been removed, but there's still unnecessary folders and movements here and there.
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This used to handle testing the pip packages too. This is why there’s testing code at the end that messes with python installations and stuff
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This also builds libtorch packages
|
||||
* This should really be separate. libtorch packages are c++ only and have no python. They should not share infra with all the python specific stuff in this file.
|
||||
* There is a lot of messing with rpaths. This is necessary, but could be made much much simpler if the above issues were fixed.
|
||||
|
||||
## Wheels (MacOS pip and libtorch packages)
|
||||
|
||||
The entrypoint file `builder/wheel/build_wheel.sh` is complicated because
|
||||
|
||||
* The mac builds used to all run on one machine (we didn’t have autoscaling mac machines till circleci). So this script handled siloing itself by setting-up and tearing-down its build env and siloing itself into its own build directory.
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This also builds libtorch packages
|
||||
* Ditto the comment above. This should definitely be separated out.
|
||||
|
||||
Note that the MacOS Python wheels are still built in conda environments. Some of the dependencies present during build also come from conda.
|
||||
|
||||
## Windows Wheels (Windows pip and libtorch packages)
|
||||
|
||||
The entrypoint file `builder/windows/build_pytorch.bat` is complicated because
|
||||
|
||||
* This used to handle building for several different python versions at the same time. This is why there are loops everywhere
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This used to handle testing the pip packages too. This is why there’s testing code at the end that messes with python installations and stuff
|
||||
* The script is never used this way anymore. This extra machinery could be removed.
|
||||
* This also builds libtorch packages
|
||||
* This should really be separate. libtorch packages are c++ only and have no python. They should not share infra with all the python specific stuff in this file.
|
||||
|
||||
Note that the Windows Python wheels are still built in conda environments. Some of the dependencies present during build also come from conda.
|
||||
|
||||
## General notes
|
||||
|
||||
### Note on run_tests.sh, smoke_test.sh, and check_binary.sh
|
||||
|
||||
* These should all be consolidated
|
||||
* These must run on all OS types: MacOS, Linux, and Windows
|
||||
* These all run smoke tests at the moment. They inspect the packages some, maybe run a few import statements. They DO NOT run the python tests nor the cpp tests. The idea is that python tests on master and PR merges will catch all breakages. All these tests have to do is make sure the special binary machinery didn’t mess anything up.
|
||||
* There are separate run_tests.sh and smoke_test.sh because one used to be called by the smoke jobs and one used to be called by the binary test jobs (see circleci structure section above). This is still true actually, but these could be united into a single script that runs these checks, given an installed pytorch package.
|
||||
|
||||
### Note on libtorch
|
||||
|
||||
Libtorch packages are built in the wheel build scripts: manywheel/build_*.sh for linux and build_wheel.sh for mac. There are several things wrong with this
|
||||
|
||||
* It’s confusing. Most of those scripts deal with python specifics.
|
||||
* The extra conditionals everywhere severely complicate the wheel build scripts
|
||||
* The process for building libtorch is different from the official instructions (a plain call to cmake, or a call to a script)
|
||||
|
||||
### Note on docker images / Dockerfiles
|
||||
|
||||
All linux builds occur in docker images. The docker images are
|
||||
|
||||
* pytorch/conda-cuda
|
||||
* Has ALL CUDA versions installed. The script pytorch/builder/conda/switch_cuda_version.sh sets /usr/local/cuda to a symlink to e.g. /usr/local/cuda-10.0 to enable different CUDA builds
|
||||
* Also used for cpu builds
|
||||
* pytorch/manylinux-cuda90
|
||||
* pytorch/manylinux-cuda100
|
||||
* Also used for cpu builds
|
||||
|
||||
The Dockerfiles are available in pytorch/builder, but there is no circleci job or script to build these docker images, and they cannot be run locally (unless you have the correct local packages/paths). Only Soumith can build them right now.
|
||||
|
||||
### General Python
|
||||
|
||||
* This is still a good explanation of python installations https://caffe2.ai/docs/faq.html#why-do-i-get-import-errors-in-python-when-i-try-to-use-caffe2
|
||||
|
||||
# How to manually rebuild the binaries
|
||||
|
||||
tl;dr make a PR that looks like https://github.com/pytorch/pytorch/pull/21159
|
||||
|
||||
Sometimes we want to push a change to master and then rebuild all of today's binaries after that change. As of May 30, 2019 there isn't a way to manually run a workflow in the UI. You can manually re-run a workflow, but it will use the exact same git commits as the first run and will not include any changes. So we have to make a PR and then force circleci to run the binary workflow instead of the normal tests. The above PR is an example of how to do this; essentially you copy-paste the binarybuilds workflow steps into the default workflow steps. If you need to point the builder repo to a different commit then you'd need to change https://github.com/pytorch/pytorch/blob/master/.circleci/scripts/binary_checkout.sh#L42-L45 to checkout what you want.
|
||||
|
||||
## How to test changes to the binaries via .circleci
|
||||
|
||||
Writing PRs that test the binaries is annoying, since the default circleci jobs that run on PRs are not the jobs that you want to run. Likely, changes to the binaries will touch something under .circleci/ and require that .circleci/config.yml be regenerated (.circleci/config.yml controls all .circleci behavior, and is generated using `.circleci/regenerate.sh` in python 3.7). But you also need to manually hardcode the binary jobs that you want to test into the .circleci/config.yml workflow, so you should actually make at least two commits, one for your changes and one to temporarily hardcode jobs. See https://github.com/pytorch/pytorch/pull/22928 as an example of how to do this.
|
||||
|
||||
```sh
|
||||
# Make your changes
|
||||
touch .circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
|
||||
# Regenerate the yaml, has to be in python 3.7
|
||||
.circleci/regenerate.sh
|
||||
|
||||
# Make a commit
|
||||
git add .circleci *
|
||||
git commit -m "My real changes"
|
||||
git push origin my_branch
|
||||
|
||||
# Now hardcode the jobs that you want in the .circleci/config.yml workflows section
|
||||
# Also eliminate ensure-consistency and should_run_job checks
|
||||
# e.g. https://github.com/pytorch/pytorch/commit/2b3344bfed8772fe86e5210cc4ee915dee42b32d
|
||||
|
||||
# Make a commit you won't keep
|
||||
git add .circleci
|
||||
git commit -m "[DO NOT LAND] testing binaries for above changes"
|
||||
git push origin my_branch
|
||||
|
||||
# Now you need to make some changes to the first commit.
|
||||
git rebase -i HEAD~2 # mark the first commit as 'edit'
|
||||
|
||||
# Make the changes
|
||||
touch .circleci/verbatim-sources/nightly-binary-build-defaults.yml
|
||||
.circleci/regenerate.sh
|
||||
|
||||
# Ammend the commit and recontinue
|
||||
git add .circleci
|
||||
git commit --amend
|
||||
git rebase --continue
|
||||
|
||||
# Update the PR, need to force since the commits are different now
|
||||
git push origin my_branch --force
|
||||
```
|
||||
|
||||
The advantage of this flow is that you can make new changes to the base commit and regenerate the .circleci without having to re-write which binary jobs you want to test on. The downside is that all updates will be force pushes.
|
||||
|
||||
## How to build a binary locally
|
||||
|
||||
### Linux
|
||||
|
||||
You can build Linux binaries locally easily using docker.
|
||||
|
||||
```sh
|
||||
# Run the docker
|
||||
# Use the correct docker image, pytorch/conda-cuda used here as an example
|
||||
#
|
||||
# -v path/to/foo:path/to/bar makes path/to/foo on your local machine (the
|
||||
# machine that you're running the command on) accessible to the docker
|
||||
# container at path/to/bar. So if you then run `touch path/to/bar/baz`
|
||||
# in the docker container then you will see path/to/foo/baz on your local
|
||||
# machine. You could also clone the pytorch and builder repos in the docker.
|
||||
#
|
||||
# If you know how, add ccache as a volume too and speed up everything
|
||||
docker run \
|
||||
-v your/pytorch/repo:/pytorch \
|
||||
-v your/builder/repo:/builder \
|
||||
-v where/you/want/packages/to/appear:/final_pkgs \
|
||||
-it pytorch/conda-cuda /bin/bash
|
||||
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
# You should probably always export at least these 3 variables
|
||||
export PACKAGE_TYPE=conda
|
||||
export DESIRED_PYTHON=3.6
|
||||
export DESIRED_CUDA=cpu
|
||||
|
||||
# Call the entrypoint
|
||||
# `|& tee foo.log` just copies all stdout and stderr output to foo.log
|
||||
# The builds generate lots of output so you probably need this when
|
||||
# building locally.
|
||||
/builder/conda/build_pytorch.sh |& tee build_output.log
|
||||
```
|
||||
|
||||
**Building CUDA binaries on docker**
|
||||
|
||||
You can build CUDA binaries on CPU only machines, but you can only run CUDA binaries on CUDA machines. This means that you can build a CUDA binary on a docker on your laptop if you so choose (though it’s gonna take a long time).
|
||||
|
||||
For Facebook employees, ask about beefy machines that have docker support and use those instead of your laptop; it will be 5x as fast.
|
||||
|
||||
### MacOS
|
||||
|
||||
There’s no easy way to generate reproducible hermetic MacOS environments. If you have a Mac laptop then you can try emulating the .circleci environments as much as possible, but you probably have packages in /usr/local/, possibly installed by brew, that will probably interfere with the build. If you’re trying to repro an error on a Mac build in .circleci and you can’t seem to repro locally, then my best advice is actually to iterate on .circleci :/
|
||||
|
||||
But if you want to try, then I’d recommend
|
||||
|
||||
```sh
|
||||
# Create a new terminal
|
||||
# Clear your LD_LIBRARY_PATH and trim as much out of your PATH as you
|
||||
# know how to do
|
||||
|
||||
# Install a new miniconda
|
||||
# First remove any other python or conda installation from your PATH
|
||||
# Always install miniconda 3, even if building for Python <3
|
||||
new_conda="~/my_new_conda"
|
||||
conda_sh="$new_conda/install_miniconda.sh"
|
||||
curl -o "$conda_sh" https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
|
||||
chmod +x "$conda_sh"
|
||||
"$conda_sh" -b -p "$MINICONDA_ROOT"
|
||||
rm -f "$conda_sh"
|
||||
export PATH="~/my_new_conda/bin:$PATH"
|
||||
|
||||
# Create a clean python env
|
||||
# All MacOS builds use conda to manage the python env and dependencies
|
||||
# that are built with, even the pip packages
|
||||
conda create -yn binary python=2.7
|
||||
conda activate binary
|
||||
|
||||
# Export whatever variables are important to you. All variables that you'd
|
||||
# possibly need are in .circleci/scripts/binary_populate_env.sh
|
||||
# You should probably always export at least these 3 variables
|
||||
export PACKAGE_TYPE=conda
|
||||
export DESIRED_PYTHON=3.6
|
||||
export DESIRED_CUDA=cpu
|
||||
|
||||
# Call the entrypoint you want
|
||||
path/to/builder/wheel/build_wheel.sh
|
||||
```
|
||||
|
||||
N.B. installing a brand new miniconda is important. This has to do with how conda installations work. See the “General Python” section above, but tldr; is that
|
||||
|
||||
1. You make the ‘conda’ command accessible by prepending `path/to/conda_root/bin` to your PATH.
|
||||
2. You make a new env and activate it, which then also gets prepended to your PATH. Now you have `path/to/conda_root/envs/new_env/bin:path/to/conda_root/bin:$PATH`
|
||||
3. Now say you (or some code that you ran) call python executable `foo`
|
||||
1. if you installed `foo` in `new_env`, then `path/to/conda_root/envs/new_env/bin/foo` will get called, as expected.
|
||||
2. But if you forgot to installed `foo` in `new_env` but happened to previously install it in your root conda env (called ‘base’), then unix/linux will still find `path/to/conda_root/bin/foo` . This is dangerous, since `foo` can be a different version than you want; `foo` can even be for an incompatible python version!
|
||||
|
||||
Newer conda versions and proper python hygiene can prevent this, but just install a new miniconda to be safe.
|
||||
|
||||
### Windows
|
||||
|
||||
TODO: fill in
|
||||
@ -30,7 +30,48 @@ def get_processor_arch_name(gpu_version):
|
||||
"cu" + gpu_version.strip("cuda") if gpu_version.startswith("cuda") else gpu_version
|
||||
)
|
||||
|
||||
LINUX_PACKAGE_VARIANTS = OrderedDict(
|
||||
manywheel=[
|
||||
"3.6m",
|
||||
"3.7m",
|
||||
"3.8m",
|
||||
"3.9m"
|
||||
],
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7m",
|
||||
],
|
||||
)
|
||||
|
||||
CONFIG_TREE_DATA = OrderedDict(
|
||||
linux=(dimensions.GPU_VERSIONS, LINUX_PACKAGE_VARIANTS),
|
||||
macos=([None], OrderedDict(
|
||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7",
|
||||
],
|
||||
)),
|
||||
macos_arm64=([None], OrderedDict(
|
||||
wheel=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
],
|
||||
conda=[
|
||||
"3.8",
|
||||
"3.9",
|
||||
],
|
||||
)),
|
||||
windows=(
|
||||
[v for v in dimensions.GPU_VERSIONS if v not in dimensions.ROCM_VERSION_LABELS],
|
||||
OrderedDict(
|
||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||
libtorch=[
|
||||
"3.7",
|
||||
],
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
# GCC config variants:
|
||||
|
||||
@ -2,13 +2,14 @@ PHASES = ["build", "test"]
|
||||
|
||||
CUDA_VERSIONS = [
|
||||
"102",
|
||||
"111",
|
||||
"113",
|
||||
"116",
|
||||
]
|
||||
|
||||
ROCM_VERSIONS = [
|
||||
"4.3.1",
|
||||
"4.5.2",
|
||||
"4.0.1",
|
||||
"4.1",
|
||||
"4.2",
|
||||
]
|
||||
|
||||
ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
|
||||
@ -16,8 +17,8 @@ ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
|
||||
GPU_VERSIONS = [None] + ["cuda" + v for v in CUDA_VERSIONS] + ROCM_VERSION_LABELS
|
||||
|
||||
STANDARD_PYTHON_VERSIONS = [
|
||||
"3.6",
|
||||
"3.7",
|
||||
"3.8",
|
||||
"3.9",
|
||||
"3.10"
|
||||
"3.9"
|
||||
]
|
||||
|
||||
@ -1,7 +1,70 @@
|
||||
from cimodel.lib.conf_tree import ConfigNode
|
||||
from cimodel.lib.conf_tree import ConfigNode, X, XImportant
|
||||
|
||||
|
||||
CONFIG_TREE_DATA = [
|
||||
("xenial", [
|
||||
("gcc", [
|
||||
("5.4", [ # All this subtree rebases to master and then build
|
||||
("3.6", [
|
||||
("important", [X(True)]),
|
||||
]),
|
||||
]),
|
||||
# TODO: bring back libtorch test
|
||||
("7", [X("3.6")]),
|
||||
]),
|
||||
("clang", [
|
||||
("7", [
|
||||
("3.6", [
|
||||
("asan", [
|
||||
(True, [
|
||||
("shard_test", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
("onnx", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
("cuda", [
|
||||
("10.2", [
|
||||
("3.6", [
|
||||
# Build are needed for slow_gradcheck
|
||||
('build_only', [X(True)]),
|
||||
("slow_gradcheck", [
|
||||
# If you update this slow gradcheck, you should
|
||||
# also update docker_definitions.py to make sure
|
||||
# the docker image match the config used here
|
||||
(True, [
|
||||
('shard_test', [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
# UNCOMMENT THE BELOW TO REENABLE LIBTORCH
|
||||
# ("libtorch", [
|
||||
# (True, [
|
||||
# ('build_only', [X(True)]),
|
||||
# ]),
|
||||
# ]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
("bionic", [
|
||||
("clang", [
|
||||
("9", [
|
||||
("3.6", [
|
||||
("xla", [XImportant(True)]),
|
||||
("vulkan", [XImportant(True)]),
|
||||
]),
|
||||
]),
|
||||
]),
|
||||
# @jithunnair-amd believes Jenkins builds are sufficient
|
||||
# ("rocm", [
|
||||
# ("3.9", [
|
||||
# ("3.6", [
|
||||
# ('build_only', [XImportant(True)]),
|
||||
# ]),
|
||||
# ]),
|
||||
# ]),
|
||||
]),
|
||||
]
|
||||
|
||||
|
||||
@ -71,10 +134,10 @@ class ExperimentalFeatureConfigNode(TreeConfigNode):
|
||||
next_nodes = {
|
||||
"asan": AsanConfigNode,
|
||||
"xla": XlaConfigNode,
|
||||
"mps": MPSConfigNode,
|
||||
"mlc": MLCConfigNode,
|
||||
"vulkan": VulkanConfigNode,
|
||||
"parallel_tbb": ParallelTBBConfigNode,
|
||||
"crossref": CrossRefConfigNode,
|
||||
"noarch": NoarchConfigNode,
|
||||
"parallel_native": ParallelNativeConfigNode,
|
||||
"onnx": ONNXConfigNode,
|
||||
"libtorch": LibTorchConfigNode,
|
||||
@ -82,6 +145,7 @@ class ExperimentalFeatureConfigNode(TreeConfigNode):
|
||||
"build_only": BuildOnlyConfigNode,
|
||||
"shard_test": ShardTestConfigNode,
|
||||
"cuda_gcc_override": CudaGccOverrideConfigNode,
|
||||
"coverage": CoverageConfigNode,
|
||||
"pure_torch": PureTorchConfigNode,
|
||||
"slow_gradcheck": SlowGradcheckConfigNode,
|
||||
}
|
||||
@ -116,12 +180,12 @@ class XlaConfigNode(TreeConfigNode):
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
|
||||
class MPSConfigNode(TreeConfigNode):
|
||||
class MLCConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "MPS=" + str(label)
|
||||
return "MLC=" + str(label)
|
||||
|
||||
def init2(self, node_name):
|
||||
self.props["is_mps"] = node_name
|
||||
self.props["is_mlc"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
@ -171,9 +235,9 @@ class ParallelTBBConfigNode(TreeConfigNode):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class CrossRefConfigNode(TreeConfigNode):
|
||||
class NoarchConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_crossref"] = node_name
|
||||
self.props["is_noarch"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ImportantConfigNode
|
||||
@ -225,6 +289,14 @@ class ShardTestConfigNode(TreeConfigNode):
|
||||
return ImportantConfigNode
|
||||
|
||||
|
||||
class CoverageConfigNode(TreeConfigNode):
|
||||
def init2(self, node_name):
|
||||
self.props["is_coverage"] = node_name
|
||||
|
||||
def child_constructor(self):
|
||||
return ExperimentalFeatureConfigNode
|
||||
|
||||
|
||||
class ImportantConfigNode(TreeConfigNode):
|
||||
def modify_label(self, label):
|
||||
return "IMPORTANT=" + str(label)
|
||||
|
||||
@ -185,7 +185,7 @@ def gen_docs_configs(xenial_parent_config):
|
||||
HiddenConf(
|
||||
"pytorch_python_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(branches_list=["master", "main", "nightly"],
|
||||
filters=gen_filter_dict(branches_list=["master", "nightly"],
|
||||
tags_list=RC_PATTERN),
|
||||
)
|
||||
)
|
||||
@ -201,7 +201,7 @@ def gen_docs_configs(xenial_parent_config):
|
||||
HiddenConf(
|
||||
"pytorch_cpp_doc_build",
|
||||
parent_build=xenial_parent_config,
|
||||
filters=gen_filter_dict(branches_list=["master", "main", "nightly"],
|
||||
filters=gen_filter_dict(branches_list=["master", "nightly"],
|
||||
tags_list=RC_PATTERN),
|
||||
)
|
||||
)
|
||||
@ -239,7 +239,8 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
compiler_version = fc.find_prop("compiler_version")
|
||||
is_xla = fc.find_prop("is_xla") or False
|
||||
is_asan = fc.find_prop("is_asan") or False
|
||||
is_crossref = fc.find_prop("is_crossref") or False
|
||||
is_coverage = fc.find_prop("is_coverage") or False
|
||||
is_noarch = fc.find_prop("is_noarch") or False
|
||||
is_onnx = fc.find_prop("is_onnx") or False
|
||||
is_pure_torch = fc.find_prop("is_pure_torch") or False
|
||||
is_vulkan = fc.find_prop("is_vulkan") or False
|
||||
@ -283,8 +284,12 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
python_version = fc.find_prop("pyver")
|
||||
parms_list[0] = fc.find_prop("abbreviated_pyver")
|
||||
|
||||
if is_crossref:
|
||||
parms_list_ignored_for_docker_image.append("crossref")
|
||||
if is_coverage:
|
||||
parms_list_ignored_for_docker_image.append("coverage")
|
||||
python_version = fc.find_prop("pyver")
|
||||
|
||||
if is_noarch:
|
||||
parms_list_ignored_for_docker_image.append("noarch")
|
||||
|
||||
if is_onnx:
|
||||
parms_list.append("onnx")
|
||||
@ -334,12 +339,13 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
build_only=build_only,
|
||||
)
|
||||
|
||||
# run docs builds on "pytorch-linux-xenial-py3.7-gcc5.4". Docs builds
|
||||
# run docs builds on "pytorch-linux-xenial-py3.6-gcc5.4". Docs builds
|
||||
# should run on a CPU-only build that runs on all PRs.
|
||||
# XXX should this be updated to a more modern build?
|
||||
# XXX should this be updated to a more modern build? Projects are
|
||||
# beginning to drop python3.6
|
||||
if (
|
||||
distro_name == "xenial"
|
||||
and fc.find_prop("pyver") == "3.7"
|
||||
and fc.find_prop("pyver") == "3.6"
|
||||
and cuda_version is None
|
||||
and parallel_backend is None
|
||||
and not is_vulkan
|
||||
@ -351,6 +357,28 @@ def instantiate_configs(only_slow_gradcheck):
|
||||
tags_list=RC_PATTERN)
|
||||
c.dependent_tests = gen_docs_configs(c)
|
||||
|
||||
if (
|
||||
compiler_name != "clang"
|
||||
and not rocm_version
|
||||
and not is_libtorch
|
||||
and not is_vulkan
|
||||
and not is_pure_torch
|
||||
and not is_noarch
|
||||
and not is_slow_gradcheck
|
||||
and not only_slow_gradcheck
|
||||
and not build_only
|
||||
):
|
||||
distributed_test = Conf(
|
||||
c.gen_build_name("") + "distributed",
|
||||
[],
|
||||
is_xla=False,
|
||||
restrict_phases=["test"],
|
||||
is_libtorch=False,
|
||||
is_important=True,
|
||||
parent_build=c,
|
||||
)
|
||||
c.dependent_tests.append(distributed_test)
|
||||
|
||||
config_list.append(c)
|
||||
|
||||
return config_list
|
||||
|
||||
119
.circleci/cimodel/data/simple/android_definitions.py
Normal file
119
.circleci/cimodel/data/simple/android_definitions.py
Normal file
@ -0,0 +1,119 @@
|
||||
import cimodel.data.simple.util.branch_filters as branch_filters
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
|
||||
|
||||
class AndroidJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
template_name,
|
||||
is_master_only=True):
|
||||
|
||||
self.variant = variant
|
||||
self.template_name = template_name
|
||||
self.is_master_only = is_master_only
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"android",
|
||||
"ndk",
|
||||
"r19c",
|
||||
] + self.variant + [
|
||||
"build",
|
||||
]
|
||||
|
||||
full_job_name = "_".join(base_name_parts)
|
||||
build_env_name = "-".join(base_name_parts)
|
||||
|
||||
props_dict = {
|
||||
"name": full_job_name,
|
||||
"build_environment": "\"{}\"".format(build_env_name),
|
||||
"docker_image": "\"{}\"".format(DOCKER_IMAGE_NDK),
|
||||
"requires": [DOCKER_REQUIREMENT_NDK]
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
class AndroidGradleJob:
|
||||
def __init__(self,
|
||||
job_name,
|
||||
template_name,
|
||||
dependencies,
|
||||
is_master_only=True,
|
||||
is_pr_only=False,
|
||||
extra_props=tuple()):
|
||||
|
||||
self.job_name = job_name
|
||||
self.template_name = template_name
|
||||
self.dependencies = dependencies
|
||||
self.is_master_only = is_master_only
|
||||
self.is_pr_only = is_pr_only
|
||||
self.extra_props = dict(extra_props)
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
props_dict = {
|
||||
"name": self.job_name,
|
||||
"requires": self.dependencies,
|
||||
}
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
||||
elif self.is_pr_only:
|
||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.PR_BRANCH_LIST)
|
||||
if self.extra_props:
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
AndroidJob(["x86_32"], "pytorch_linux_build", is_master_only=False),
|
||||
AndroidJob(["x86_64"], "pytorch_linux_build"),
|
||||
AndroidJob(["arm", "v7a"], "pytorch_linux_build"),
|
||||
AndroidJob(["arm", "v8a"], "pytorch_linux_build"),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build-x86_32",
|
||||
"pytorch_android_gradle_build-x86_32",
|
||||
["pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build"],
|
||||
is_master_only=False,
|
||||
is_pr_only=True),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single",
|
||||
"pytorch_android_gradle_custom_build_single",
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
is_master_only=False,
|
||||
is_pr_only=True),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single-full-jit",
|
||||
"pytorch_android_gradle_custom_build_single",
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
is_master_only=False,
|
||||
is_pr_only=True,
|
||||
extra_props=tuple({
|
||||
"lite_interpreter": miniutils.quote(str(int(False)))
|
||||
}.items())),
|
||||
AndroidGradleJob(
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build",
|
||||
"pytorch_android_gradle_build",
|
||||
["pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build",
|
||||
"pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build"]),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
69
.circleci/cimodel/data/simple/bazel_definitions.py
Normal file
69
.circleci/cimodel/data/simple/bazel_definitions.py
Normal file
@ -0,0 +1,69 @@
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_GCC7,
|
||||
DOCKER_REQUIREMENT_GCC7
|
||||
)
|
||||
|
||||
|
||||
def gen_job_name(phase):
|
||||
job_name_parts = [
|
||||
"pytorch",
|
||||
"bazel",
|
||||
phase,
|
||||
]
|
||||
|
||||
return "_".join(job_name_parts)
|
||||
|
||||
|
||||
class BazelJob:
|
||||
def __init__(self, phase, extra_props=None):
|
||||
self.phase = phase
|
||||
self.extra_props = extra_props or {}
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
template_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"bazel",
|
||||
self.phase,
|
||||
]
|
||||
|
||||
build_env_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3.6",
|
||||
"gcc7",
|
||||
"bazel",
|
||||
self.phase,
|
||||
]
|
||||
|
||||
full_job_name = gen_job_name(self.phase)
|
||||
build_env_name = "-".join(build_env_parts)
|
||||
|
||||
extra_requires = (
|
||||
[gen_job_name("build")] if self.phase == "test" else
|
||||
[DOCKER_REQUIREMENT_GCC7]
|
||||
)
|
||||
|
||||
props_dict = {
|
||||
"build_environment": build_env_name,
|
||||
"docker_image": DOCKER_IMAGE_GCC7,
|
||||
"name": full_job_name,
|
||||
"requires": extra_requires,
|
||||
}
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
template_name = "_".join(template_parts)
|
||||
return [{template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
BazelJob("build", {"resource_class": "large"}),
|
||||
BazelJob("test"),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
193
.circleci/cimodel/data/simple/binary_smoketest.py
Normal file
193
.circleci/cimodel/data/simple/binary_smoketest.py
Normal file
@ -0,0 +1,193 @@
|
||||
"""
|
||||
TODO: Refactor circleci/cimodel/data/binary_build_data.py to generate this file
|
||||
instead of doing one offs here
|
||||
Binary builds (subset, to smoke test that they'll work)
|
||||
|
||||
NB: If you modify this file, you need to also modify
|
||||
the binary_and_smoke_tests_on_pr variable in
|
||||
pytorch-ci-hud to adjust the allowed build list
|
||||
at https://github.com/ezyang/pytorch-ci-hud/blob/master/src/BuildHistoryDisplay.js
|
||||
|
||||
Note:
|
||||
This binary build is currently broken, see https://github_com/pytorch/pytorch/issues/16710
|
||||
- binary_linux_conda_3_6_cu90_devtoolset7_build
|
||||
- binary_linux_conda_3_6_cu90_devtoolset7_test
|
||||
|
||||
TODO
|
||||
we should test a libtorch cuda build, but they take too long
|
||||
- binary_linux_libtorch_3_6m_cu90_devtoolset7_static-without-deps_build
|
||||
"""
|
||||
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.data.simple.util.branch_filters
|
||||
|
||||
|
||||
class SmoketestJob:
|
||||
def __init__(self,
|
||||
template_name,
|
||||
build_env_parts,
|
||||
docker_image,
|
||||
job_name,
|
||||
is_master_only=False,
|
||||
requires=None,
|
||||
has_libtorch_variant=False,
|
||||
extra_props=None):
|
||||
|
||||
self.template_name = template_name
|
||||
self.build_env_parts = build_env_parts
|
||||
self.docker_image = docker_image
|
||||
self.job_name = job_name
|
||||
self.is_master_only = is_master_only
|
||||
self.requires = requires or []
|
||||
self.has_libtorch_variant = has_libtorch_variant
|
||||
self.extra_props = extra_props or {}
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
props_dict = {
|
||||
"build_environment": " ".join(self.build_env_parts),
|
||||
"name": self.job_name,
|
||||
"requires": self.requires,
|
||||
}
|
||||
|
||||
if self.docker_image:
|
||||
props_dict["docker_image"] = self.docker_image
|
||||
|
||||
if self.is_master_only:
|
||||
props_dict["filters"] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
|
||||
|
||||
if self.has_libtorch_variant:
|
||||
props_dict["libtorch_variant"] = "shared-with-deps"
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["manywheel", "3.7m", "cu102", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_manywheel_3_7m_cu102_devtoolset7_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build",
|
||||
is_master_only=True,
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_build",
|
||||
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
||||
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
||||
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build",
|
||||
is_master_only=False,
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_mac_build",
|
||||
["wheel", "3.7", "cpu"],
|
||||
None,
|
||||
"binary_macos_wheel_3_7_cpu_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
# This job has an average run time of 3 hours o.O
|
||||
# Now only running this on master to reduce overhead
|
||||
SmoketestJob(
|
||||
"binary_mac_build",
|
||||
["libtorch", "3.7", "cpu"],
|
||||
None,
|
||||
"binary_macos_libtorch_3_7_cpu_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["libtorch", "3.7", "cpu", "debug"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_debug_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["libtorch", "3.7", "cpu", "release"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_release_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_build",
|
||||
["wheel", "3.7", "cu102"],
|
||||
None,
|
||||
"binary_windows_wheel_3_7_cu102_build",
|
||||
is_master_only=True,
|
||||
),
|
||||
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["libtorch", "3.7", "cpu", "debug"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_debug_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_windows_libtorch_3_7_cpu_debug_build"],
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["libtorch", "3.7", "cpu", "release"],
|
||||
None,
|
||||
"binary_windows_libtorch_3_7_cpu_release_test",
|
||||
is_master_only=False,
|
||||
requires=["binary_windows_libtorch_3_7_cpu_release_build"],
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_windows_test",
|
||||
["wheel", "3.7", "cu102"],
|
||||
None,
|
||||
"binary_windows_wheel_3_7_cu102_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_windows_wheel_3_7_cu102_build"],
|
||||
extra_props={
|
||||
"executor": "windows-with-nvidia-gpu",
|
||||
},
|
||||
),
|
||||
|
||||
|
||||
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["manywheel", "3.7m", "cu102", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_manywheel_3_7m_cu102_devtoolset7_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_manywheel_3_7m_cu102_devtoolset7_build"],
|
||||
extra_props={
|
||||
"resource_class": "gpu.medium",
|
||||
"use_cuda_docker_runtime": miniutils.quote((str(1))),
|
||||
},
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||
"pytorch/manylinux-cuda102",
|
||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build"],
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
SmoketestJob(
|
||||
"binary_linux_test",
|
||||
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
||||
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
||||
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test",
|
||||
is_master_only=True,
|
||||
requires=["binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build"],
|
||||
has_libtorch_variant=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -4,8 +4,27 @@ from cimodel.lib.miniutils import quote
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
|
||||
|
||||
|
||||
# NOTE: All hardcoded docker image builds have been migrated to GHA
|
||||
# TODO: make this generated from a matrix rather than just a static list
|
||||
IMAGE_NAMES = [
|
||||
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7",
|
||||
"pytorch-linux-bionic-py3.6-clang9",
|
||||
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9",
|
||||
"pytorch-linux-bionic-py3.8-gcc9",
|
||||
"pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
|
||||
"pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
|
||||
"pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7",
|
||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
||||
"pytorch-linux-xenial-py3-clang5-asan",
|
||||
"pytorch-linux-xenial-py3-clang7-asan",
|
||||
"pytorch-linux-xenial-py3-clang7-onnx",
|
||||
"pytorch-linux-xenial-py3.8",
|
||||
"pytorch-linux-xenial-py3.6-clang7",
|
||||
"pytorch-linux-xenial-py3.6-gcc5.4", # this one is used in doc builds
|
||||
"pytorch-linux-xenial-py3.6-gcc7.2",
|
||||
"pytorch-linux-xenial-py3.6-gcc7",
|
||||
"pytorch-linux-bionic-rocm4.1-py3.6",
|
||||
"pytorch-linux-bionic-rocm4.2-py3.6",
|
||||
"pytorch-linux-bionic-rocm4.3.1-py3.6",
|
||||
]
|
||||
|
||||
# This entry should be an element from the list above
|
||||
@ -13,12 +32,10 @@ IMAGE_NAMES = [
|
||||
# pytorch_build_data.py
|
||||
SLOW_GRADCHECK_IMAGE_NAME = "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
|
||||
def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False):
|
||||
def get_workflow_jobs(only_slow_gradcheck=False):
|
||||
"""Generates a list of docker image build definitions"""
|
||||
ret = []
|
||||
for image_name in images:
|
||||
if image_name.startswith('docker-'):
|
||||
image_name = image_name.lstrip('docker-')
|
||||
for image_name in IMAGE_NAMES:
|
||||
if only_slow_gradcheck and image_name is not SLOW_GRADCHECK_IMAGE_NAME:
|
||||
continue
|
||||
|
||||
@ -26,7 +43,7 @@ def get_workflow_jobs(images=IMAGE_NAMES, only_slow_gradcheck=False):
|
||||
"name": quote(f"docker-{image_name}"),
|
||||
"image_name": quote(image_name),
|
||||
})
|
||||
if image_name == "pytorch-linux-xenial-py3.7-gcc5.4":
|
||||
if image_name == "pytorch-linux-xenial-py3.6-gcc5.4":
|
||||
# pushing documentation on tags requires CircleCI to also
|
||||
# build all the dependencies on tags, including this docker image
|
||||
parameters['filters'] = gen_filter_dict(branches_list=r"/.*/",
|
||||
|
||||
@ -75,12 +75,6 @@ WORKFLOW_DATA = [
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom"), extra_props={
|
||||
"op_list": "mobilenetv2.yaml",
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64", "coreml"), is_org_member_context=False, extra_props={
|
||||
"use_coreml": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "coreml"), extra_props={
|
||||
"use_coreml": miniutils.quote(str(int(True))),
|
||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -4,6 +4,12 @@ PyTorch Mobile PR builds (use linux host toolchain + mobile build options)
|
||||
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.data.simple.util.branch_filters
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_ASAN,
|
||||
DOCKER_REQUIREMENT_ASAN,
|
||||
DOCKER_IMAGE_NDK,
|
||||
DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
|
||||
|
||||
class MobileJob:
|
||||
@ -46,6 +52,33 @@ class MobileJob:
|
||||
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
MobileJob(
|
||||
DOCKER_IMAGE_ASAN,
|
||||
[DOCKER_REQUIREMENT_ASAN],
|
||||
["build"]
|
||||
),
|
||||
|
||||
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
|
||||
MobileJob(
|
||||
DOCKER_IMAGE_NDK,
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
["custom", "build", "dynamic"]
|
||||
),
|
||||
|
||||
MobileJob(
|
||||
DOCKER_IMAGE_NDK,
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
["custom", "build", "static"]
|
||||
),
|
||||
|
||||
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
|
||||
# Most of this CI is already covered by "mobile-custom-build-dynamic" job
|
||||
MobileJob(
|
||||
DOCKER_IMAGE_NDK,
|
||||
[DOCKER_REQUIREMENT_NDK],
|
||||
["code", "analysis"],
|
||||
True
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
||||
77
.circleci/cimodel/data/simple/nightly_android.py
Normal file
77
.circleci/cimodel/data/simple/nightly_android.py
Normal file
@ -0,0 +1,77 @@
|
||||
from cimodel.data.simple.util.docker_constants import (
|
||||
DOCKER_IMAGE_NDK,
|
||||
DOCKER_REQUIREMENT_NDK
|
||||
)
|
||||
|
||||
|
||||
class AndroidNightlyJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
template_name,
|
||||
extra_props=None,
|
||||
with_docker=True,
|
||||
requires=None,
|
||||
no_build_suffix=False):
|
||||
|
||||
self.variant = variant
|
||||
self.template_name = template_name
|
||||
self.extra_props = extra_props or {}
|
||||
self.with_docker = with_docker
|
||||
self.requires = requires
|
||||
self.no_build_suffix = no_build_suffix
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"linux",
|
||||
"xenial",
|
||||
"py3",
|
||||
"clang5",
|
||||
"android",
|
||||
"ndk",
|
||||
"r19c",
|
||||
] + self.variant
|
||||
|
||||
build_suffix = [] if self.no_build_suffix else ["build"]
|
||||
full_job_name = "_".join(["nightly"] + base_name_parts + build_suffix)
|
||||
build_env_name = "-".join(base_name_parts)
|
||||
|
||||
props_dict = {
|
||||
"name": full_job_name,
|
||||
"requires": self.requires,
|
||||
"filters": {"branches": {"only": "nightly"}},
|
||||
}
|
||||
|
||||
props_dict.update(self.extra_props)
|
||||
|
||||
if self.with_docker:
|
||||
props_dict["docker_image"] = DOCKER_IMAGE_NDK
|
||||
props_dict["build_environment"] = build_env_name
|
||||
|
||||
return [{self.template_name: props_dict}]
|
||||
|
||||
BASE_REQUIRES = [DOCKER_REQUIREMENT_NDK]
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
AndroidNightlyJob(["x86_32"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["x86_64"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["arm", "v7a"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["arm", "v8a"], "pytorch_linux_build", requires=BASE_REQUIRES),
|
||||
AndroidNightlyJob(["android_gradle"], "pytorch_android_gradle_build",
|
||||
with_docker=False,
|
||||
requires=[
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_32_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_x86_64_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v7a_build",
|
||||
"nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_arm_v8a_build"]),
|
||||
AndroidNightlyJob(["x86_32_android_publish_snapshot"], "pytorch_android_publish_snapshot",
|
||||
extra_props={"context": "org-member"},
|
||||
with_docker=False,
|
||||
requires=["nightly_pytorch_linux_xenial_py3_clang5_android_ndk_r19c_android_gradle_build"],
|
||||
no_build_suffix=True),
|
||||
]
|
||||
|
||||
|
||||
def get_workflow_jobs():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
@ -5,11 +5,9 @@ import cimodel.lib.miniutils as miniutils
|
||||
class IOSNightlyJob:
|
||||
def __init__(self,
|
||||
variant,
|
||||
is_full_jit=False,
|
||||
is_upload=False):
|
||||
|
||||
self.variant = variant
|
||||
self.is_full_jit = is_full_jit
|
||||
self.is_upload = is_upload
|
||||
|
||||
def get_phase_name(self):
|
||||
@ -19,11 +17,8 @@ class IOSNightlyJob:
|
||||
|
||||
extra_name_suffix = [self.get_phase_name()] if self.is_upload else []
|
||||
|
||||
extra_name = ["full_jit"] if self.is_full_jit else []
|
||||
|
||||
common_name_pieces = [
|
||||
"ios",
|
||||
] + extra_name + [
|
||||
] + ios_definitions.XCODE_VERSION.render_dots_or_parts(with_version_dots) + [
|
||||
"nightly",
|
||||
self.variant,
|
||||
@ -36,8 +31,7 @@ class IOSNightlyJob:
|
||||
return "_".join(["pytorch"] + self.get_common_name_pieces(False))
|
||||
|
||||
def gen_tree(self):
|
||||
build_configs = BUILD_CONFIGS_FULL_JIT if self.is_full_jit else BUILD_CONFIGS
|
||||
extra_requires = [x.gen_job_name() for x in build_configs] if self.is_upload else []
|
||||
extra_requires = [x.gen_job_name() for x in BUILD_CONFIGS] if self.is_upload else []
|
||||
|
||||
props_dict = {
|
||||
"build_environment": "-".join(["libtorch"] + self.get_common_name_pieces(True)),
|
||||
@ -53,9 +47,6 @@ class IOSNightlyJob:
|
||||
props_dict["use_metal"] = miniutils.quote(str(int(True)))
|
||||
props_dict["use_coreml"] = miniutils.quote(str(int(True)))
|
||||
|
||||
if self.is_full_jit:
|
||||
props_dict["lite_interpreter"] = miniutils.quote(str(int(False)))
|
||||
|
||||
template_name = "_".join([
|
||||
"binary",
|
||||
"ios",
|
||||
@ -70,14 +61,9 @@ BUILD_CONFIGS = [
|
||||
IOSNightlyJob("arm64"),
|
||||
]
|
||||
|
||||
BUILD_CONFIGS_FULL_JIT = [
|
||||
IOSNightlyJob("x86_64", is_full_jit=True),
|
||||
IOSNightlyJob("arm64", is_full_jit=True),
|
||||
]
|
||||
|
||||
WORKFLOW_DATA = BUILD_CONFIGS + BUILD_CONFIGS_FULL_JIT + [
|
||||
IOSNightlyJob("binary", is_full_jit=False, is_upload=True),
|
||||
IOSNightlyJob("binary", is_full_jit=True, is_upload=True),
|
||||
WORKFLOW_DATA = BUILD_CONFIGS + [
|
||||
IOSNightlyJob("binary", is_upload=True),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
NON_PR_BRANCH_LIST = [
|
||||
"main",
|
||||
"master",
|
||||
r"/ci-all\/.*/",
|
||||
r"/release\/.*/",
|
||||
|
||||
@ -11,7 +11,7 @@ def gen_docker_image_requires(image_name):
|
||||
|
||||
|
||||
DOCKER_IMAGE_BASIC, DOCKER_REQUIREMENT_BASE = gen_docker_image(
|
||||
"pytorch-linux-xenial-py3.7-gcc5.4"
|
||||
"pytorch-linux-xenial-py3.6-gcc5.4"
|
||||
)
|
||||
|
||||
DOCKER_IMAGE_CUDA_10_2, DOCKER_REQUIREMENT_CUDA_10_2 = gen_docker_image(
|
||||
@ -19,7 +19,7 @@ DOCKER_IMAGE_CUDA_10_2, DOCKER_REQUIREMENT_CUDA_10_2 = gen_docker_image(
|
||||
)
|
||||
|
||||
DOCKER_IMAGE_GCC7, DOCKER_REQUIREMENT_GCC7 = gen_docker_image(
|
||||
"pytorch-linux-xenial-py3.7-gcc7"
|
||||
"pytorch-linux-xenial-py3.6-gcc7"
|
||||
)
|
||||
|
||||
|
||||
|
||||
160
.circleci/cimodel/data/windows_build_definitions.py
Normal file
160
.circleci/cimodel/data/windows_build_definitions.py
Normal file
@ -0,0 +1,160 @@
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN, NON_PR_BRANCH_LIST
|
||||
from cimodel.data.simple.util.versions import CudaVersion
|
||||
|
||||
|
||||
class WindowsJob:
|
||||
def __init__(
|
||||
self,
|
||||
test_index,
|
||||
vscode_spec,
|
||||
cuda_version,
|
||||
force_on_cpu=False,
|
||||
multi_gpu=False,
|
||||
master_only=False,
|
||||
nightly_only=False,
|
||||
master_and_nightly=False
|
||||
):
|
||||
self.test_index = test_index
|
||||
self.vscode_spec = vscode_spec
|
||||
self.cuda_version = cuda_version
|
||||
self.force_on_cpu = force_on_cpu
|
||||
self.multi_gpu = multi_gpu
|
||||
self.master_only = master_only
|
||||
self.nightly_only = nightly_only
|
||||
self.master_and_nightly = master_and_nightly
|
||||
|
||||
def gen_tree(self):
|
||||
|
||||
base_phase = "build" if self.test_index is None else "test"
|
||||
numbered_phase = (
|
||||
base_phase if self.test_index is None else base_phase + str(self.test_index)
|
||||
)
|
||||
|
||||
key_parts = ["pytorch", "windows", base_phase]
|
||||
if self.multi_gpu:
|
||||
key_parts.append('multigpu')
|
||||
key_name = "_".join(key_parts)
|
||||
|
||||
cpu_forcing_name_parts = ["on", "cpu"] if self.force_on_cpu else []
|
||||
|
||||
target_arch = self.cuda_version.render_dots() if self.cuda_version else "cpu"
|
||||
|
||||
python_version = "3.8"
|
||||
|
||||
base_name_parts = [
|
||||
"pytorch",
|
||||
"windows",
|
||||
self.vscode_spec.render(),
|
||||
"py" + python_version.replace(".", ""),
|
||||
target_arch,
|
||||
]
|
||||
|
||||
prerequisite_jobs = []
|
||||
if base_phase == "test":
|
||||
prerequisite_jobs.append("_".join(base_name_parts + ["build"]))
|
||||
|
||||
if self.cuda_version:
|
||||
self.cudnn_version = 8 if self.cuda_version.major == 11 else 7
|
||||
|
||||
arch_env_elements = (
|
||||
["cuda" + str(self.cuda_version.major) + "." + str(self.cuda_version.minor)]
|
||||
if self.cuda_version
|
||||
else ["cpu"]
|
||||
)
|
||||
|
||||
build_environment_string = "-".join(
|
||||
["pytorch", "win"]
|
||||
+ self.vscode_spec.get_elements()
|
||||
+ arch_env_elements
|
||||
+ ["py" + python_version.split(".")[0]]
|
||||
)
|
||||
|
||||
is_running_on_cuda = bool(self.cuda_version) and not self.force_on_cpu
|
||||
|
||||
if self.multi_gpu:
|
||||
props_dict = {"requires": prerequisite_jobs}
|
||||
else:
|
||||
props_dict = {
|
||||
"build_environment": build_environment_string,
|
||||
"python_version": miniutils.quote(python_version),
|
||||
"vs_version": miniutils.quote("16.8.6"),
|
||||
"vc_version": miniutils.quote(self.vscode_spec.dotted_version()),
|
||||
"vc_year": miniutils.quote(str(self.vscode_spec.year)),
|
||||
"vc_product": self.vscode_spec.get_product(),
|
||||
"use_cuda": miniutils.quote(str(int(is_running_on_cuda))),
|
||||
"requires": prerequisite_jobs,
|
||||
}
|
||||
|
||||
if self.master_only:
|
||||
props_dict[
|
||||
"filters"
|
||||
] = gen_filter_dict()
|
||||
elif self.nightly_only:
|
||||
props_dict[
|
||||
"filters"
|
||||
] = gen_filter_dict(branches_list=["nightly"], tags_list=RC_PATTERN)
|
||||
elif self.master_and_nightly:
|
||||
props_dict[
|
||||
"filters"
|
||||
] = gen_filter_dict(branches_list=NON_PR_BRANCH_LIST + ["nightly"], tags_list=RC_PATTERN)
|
||||
|
||||
name_parts = base_name_parts + cpu_forcing_name_parts + [numbered_phase]
|
||||
|
||||
if not self.multi_gpu:
|
||||
if base_phase == "test":
|
||||
test_name = "-".join(["pytorch", "windows", numbered_phase])
|
||||
props_dict["test_name"] = test_name
|
||||
|
||||
if is_running_on_cuda:
|
||||
props_dict["executor"] = "windows-with-nvidia-gpu"
|
||||
|
||||
props_dict["cuda_version"] = (
|
||||
miniutils.quote(str(self.cuda_version))
|
||||
if self.cuda_version
|
||||
else "cpu"
|
||||
)
|
||||
|
||||
props_dict["name"] = "_".join(name_parts)
|
||||
|
||||
return [{key_name: props_dict}]
|
||||
|
||||
|
||||
class VcSpec:
|
||||
def __init__(self, year, version_elements=None, hide_version=False):
|
||||
self.year = year
|
||||
self.version_elements = version_elements or []
|
||||
self.hide_version = hide_version
|
||||
|
||||
def get_elements(self):
|
||||
if self.hide_version:
|
||||
return [self.prefixed_year()]
|
||||
return [self.prefixed_year()] + self.version_elements
|
||||
|
||||
def get_product(self):
|
||||
return "BuildTools"
|
||||
|
||||
def dotted_version(self):
|
||||
return ".".join(self.version_elements)
|
||||
|
||||
def prefixed_year(self):
|
||||
return "vs" + str(self.year)
|
||||
|
||||
def render(self):
|
||||
return "_".join(self.get_elements())
|
||||
|
||||
_VC2019 = VcSpec(2019)
|
||||
|
||||
WORKFLOW_DATA = [
|
||||
# VS2019 CUDA-10.2
|
||||
WindowsJob(None, _VC2019, CudaVersion(10, 2), master_only=True),
|
||||
# VS2019 CUDA-10.2 force on cpu
|
||||
WindowsJob(1, _VC2019, CudaVersion(10, 2), force_on_cpu=True, master_only=True),
|
||||
|
||||
# TODO: This test is disabled due to https://github.com/pytorch/pytorch/issues/59724
|
||||
# WindowsJob('_azure_multi_gpu', _VC2019, CudaVersion(11, 1), multi_gpu=True, master_and_nightly=True),
|
||||
]
|
||||
|
||||
|
||||
def get_windows_workflows():
|
||||
return [item.gen_tree() for item in WORKFLOW_DATA]
|
||||
7840
.circleci/config.yml
generated
7840
.circleci/config.yml
generated
File diff suppressed because it is too large
Load Diff
@ -51,9 +51,9 @@ android {
|
||||
dependencies {
|
||||
implementation 'com.android.support:appcompat-v7:28.0.0'
|
||||
implementation 'androidx.appcompat:appcompat:1.0.0'
|
||||
implementation 'com.facebook.fbjni:fbjni-java-only:0.2.2'
|
||||
implementation 'com.facebook.fbjni:fbjni-java-only:0.0.3'
|
||||
implementation 'com.google.code.findbugs:jsr305:3.0.1'
|
||||
implementation 'com.facebook.soloader:nativeloader:0.10.1'
|
||||
implementation 'com.facebook.soloader:nativeloader:0.8.0'
|
||||
|
||||
implementation 'junit:junit:' + rootProject.junitVersion
|
||||
implementation 'androidx.test:core:' + rootProject.coreVersion
|
||||
|
||||
@ -40,12 +40,6 @@ function extract_all_from_image_name() {
|
||||
done
|
||||
}
|
||||
|
||||
# Use the same pre-built XLA test image from PyTorch/XLA
|
||||
if [[ "$image" == *xla* ]]; then
|
||||
echo "Using pre-built XLA test image..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "$image" == *-xenial* ]]; then
|
||||
UBUNTU_VERSION=16.04
|
||||
elif [[ "$image" == *-artful* ]]; then
|
||||
@ -76,10 +70,6 @@ elif [[ "$image" == *rocm* ]]; then
|
||||
DOCKERFILE="${OS}-rocm/Dockerfile"
|
||||
fi
|
||||
|
||||
if [[ "$image" == *xenial* ]] || [[ "$image" == *bionic* ]]; then
|
||||
CMAKE_VERSION=3.13.5
|
||||
fi
|
||||
|
||||
TRAVIS_DL_URL_PREFIX="https://s3.amazonaws.com/travis-python-archives/binaries/ubuntu/14.04/x86_64"
|
||||
|
||||
# It's annoying to rename jobs every time you want to rewrite a
|
||||
@ -88,24 +78,28 @@ TRAVIS_DL_URL_PREFIX="https://s3.amazonaws.com/travis-python-archives/binaries/u
|
||||
case "$image" in
|
||||
pytorch-linux-xenial-py3.8)
|
||||
ANACONDA_PYTHON_VERSION=3.8
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py3.7-gcc5.4)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
pytorch-linux-xenial-py3.6-gcc5.4)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=5
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3.7-gcc7.2)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
pytorch-linux-xenial-py3.6-gcc7.2)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
# Do not install PROTOBUF, DB, and VISION as a test
|
||||
;;
|
||||
pytorch-linux-xenial-py3.7-gcc7)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
pytorch-linux-xenial-py3.6-gcc7)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -114,7 +108,19 @@ case "$image" in
|
||||
pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7)
|
||||
CUDA_VERSION=10.2
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.1
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -124,29 +130,8 @@ case "$image" in
|
||||
pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.3.0 # Deviating from major.minor to conform to nvidia's Docker image names
|
||||
CUDNN_VERSION=8
|
||||
TENSORRT_VERSION=8.0.1.6
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda11.3-cudnn8-py3-clang9)
|
||||
CUDA_VERSION=11.3.0 # Deviating from major.minor to conform to nvidia's Docker image names
|
||||
CUDNN_VERSION=8
|
||||
TENSORRT_VERSION=8.0.1.6
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CLANG_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda11.6-cudnn8-py3-gcc7)
|
||||
CUDA_VERSION=11.6.0
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CMAKE_VERSION=3.10.3
|
||||
GCC_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -154,29 +139,33 @@ case "$image" in
|
||||
KATEX=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang5-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=5.0
|
||||
CMAKE_VERSION=3.10.3
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang7-asan)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=7
|
||||
CMAKE_VERSION=3.10.3
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang7-onnx)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=7
|
||||
CMAKE_VERSION=3.10.3
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-xenial-py3-clang5-android-ndk-r19c)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=5.0
|
||||
CMAKE_VERSION=3.10.3
|
||||
LLVMDEV=yes
|
||||
PROTOBUF=yes
|
||||
ANDROID=yes
|
||||
@ -184,15 +173,16 @@ case "$image" in
|
||||
GRADLE_VERSION=6.8.3
|
||||
NINJA_VERSION=1.9.0
|
||||
;;
|
||||
pytorch-linux-xenial-py3.7-clang7)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
pytorch-linux-xenial-py3.6-clang7)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CMAKE_VERSION=3.10.3
|
||||
CLANG_VERSION=7
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-bionic-py3.7-clang9)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
pytorch-linux-bionic-py3.6-clang9)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -207,10 +197,10 @@ case "$image" in
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-bionic-cuda10.2-cudnn7-py3.7-clang9)
|
||||
pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9)
|
||||
CUDA_VERSION=10.2
|
||||
CUDNN_VERSION=7
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
CLANG_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
@ -225,30 +215,39 @@ case "$image" in
|
||||
DB=yes
|
||||
VISION=yes
|
||||
;;
|
||||
pytorch-linux-bionic-rocm5.0-py3.7)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
pytorch-linux-bionic-cuda11.0-cudnn8-py3.6-gcc9)
|
||||
CUDA_VERSION=11.0
|
||||
CUDNN_VERSION=8
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=5.0
|
||||
ROCM_VERSION=3.9
|
||||
;;
|
||||
pytorch-linux-bionic-rocm5.1-py3.7)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
pytorch-linux-bionic-rocm4.1-py3.6)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=5.1.1
|
||||
ROCM_VERSION=4.1
|
||||
;;
|
||||
pytorch-linux-focal-py3.7-gcc7)
|
||||
ANACONDA_PYTHON_VERSION=3.7
|
||||
CMAKE_VERSION=3.12.4 # To make sure XNNPACK is enabled for the BACKWARDS_COMPAT_TEST used with this image
|
||||
GCC_VERSION=7
|
||||
pytorch-linux-bionic-rocm4.2-py3.6)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
KATEX=yes
|
||||
ROCM_VERSION=4.2
|
||||
;;
|
||||
pytorch-linux-bionic-rocm4.3.1-py3.6)
|
||||
ANACONDA_PYTHON_VERSION=3.6
|
||||
GCC_VERSION=9
|
||||
PROTOBUF=yes
|
||||
DB=yes
|
||||
VISION=yes
|
||||
ROCM_VERSION=4.3.1
|
||||
;;
|
||||
*)
|
||||
# Catch-all for builds that are not hardcoded.
|
||||
@ -256,6 +255,9 @@ case "$image" in
|
||||
DB=yes
|
||||
VISION=yes
|
||||
echo "image '$image' did not match an existing build configuration"
|
||||
if [[ "$image" == *xenial* ]]; then
|
||||
CMAKE_VERSION=3.10.3
|
||||
fi
|
||||
if [[ "$image" == *py* ]]; then
|
||||
extract_version_from_image_name py ANACONDA_PYTHON_VERSION
|
||||
fi
|
||||
@ -292,14 +294,6 @@ fi
|
||||
|
||||
tmp_tag=$(basename "$(mktemp -u)" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
#when using cudnn version 8 install it separately from cuda
|
||||
if [[ "$image" == *cuda* && ${OS} == "ubuntu" ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
IMAGE_NAME="nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build image
|
||||
# TODO: build-arg THRIFT is not turned on for any image, remove it once we confirm
|
||||
# it's no longer needed.
|
||||
@ -326,7 +320,6 @@ docker build \
|
||||
--build-arg "GCC_VERSION=${GCC_VERSION}" \
|
||||
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
|
||||
--build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \
|
||||
--build-arg "TENSORRT_VERSION=${TENSORRT_VERSION}" \
|
||||
--build-arg "ANDROID=${ANDROID}" \
|
||||
--build-arg "ANDROID_NDK=${ANDROID_NDK_VERSION}" \
|
||||
--build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \
|
||||
@ -336,8 +329,6 @@ docker build \
|
||||
--build-arg "NINJA_VERSION=${NINJA_VERSION:-}" \
|
||||
--build-arg "KATEX=${KATEX:-}" \
|
||||
--build-arg "ROCM_VERSION=${ROCM_VERSION:-}" \
|
||||
--build-arg "PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH:-gfx900;gfx906}" \
|
||||
--build-arg "IMAGE_NAME=${IMAGE_NAME}" \
|
||||
-f $(dirname ${DOCKERFILE})/Dockerfile \
|
||||
-t "$tmp_tag" \
|
||||
"$@" \
|
||||
@ -356,7 +347,6 @@ function drun() {
|
||||
}
|
||||
|
||||
if [[ "$OS" == "ubuntu" ]]; then
|
||||
|
||||
if !(drun lsb_release -a 2>&1 | grep -qF Ubuntu); then
|
||||
echo "OS=ubuntu, but:"
|
||||
drun lsb_release -a
|
||||
|
||||
@ -26,14 +26,11 @@ login() {
|
||||
docker login -u AWS --password-stdin "$1"
|
||||
}
|
||||
|
||||
# Retry on timeouts (can happen on job stampede).
|
||||
retry login "${registry}"
|
||||
|
||||
# Only run these steps if not on github actions
|
||||
if [[ -z "${GITHUB_ACTIONS}" ]]; then
|
||||
# Retry on timeouts (can happen on job stampede).
|
||||
retry login "${registry}"
|
||||
# Logout on exit
|
||||
trap "docker logout ${registry}" EXIT
|
||||
fi
|
||||
# Logout on exit
|
||||
trap "docker logout ${registry}" EXIT
|
||||
|
||||
# export EC2=1
|
||||
# export JENKINS=1
|
||||
@ -48,8 +45,8 @@ fi
|
||||
|
||||
docker push "${image}:${tag}"
|
||||
|
||||
docker save -o "${IMAGE_NAME}:${tag}.tar" "${image}:${tag}"
|
||||
|
||||
if [ -z "${DOCKER_SKIP_S3_UPLOAD:-}" ]; then
|
||||
trap "rm -rf ${IMAGE_NAME}:${tag}.tar" EXIT
|
||||
docker save -o "${IMAGE_NAME}:${tag}.tar" "${image}:${tag}"
|
||||
aws s3 cp "${IMAGE_NAME}:${tag}.tar" "s3://ossci-linux-build/pytorch/base/${IMAGE_NAME}:${tag}.tar" --acl public-read
|
||||
fi
|
||||
|
||||
@ -4,10 +4,6 @@ FROM centos:${CENTOS_VERSION}
|
||||
|
||||
ARG CENTOS_VERSION
|
||||
|
||||
# Set AMD gpu targets to build for
|
||||
ARG PYTORCH_ROCM_ARCH
|
||||
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
|
||||
|
||||
# Install required packages to build Caffe2
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
@ -15,12 +11,6 @@ ARG EC2
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
RUN bash ./install_base.sh && rm install_base.sh
|
||||
|
||||
# Update CentOS git version
|
||||
RUN yum -y remove git
|
||||
RUN yum -y remove git-*
|
||||
RUN yum -y install https://packages.endpoint.com/rhel/7/os/x86_64/endpoint-repo-1.9-1.x86_64.rpm
|
||||
RUN yum install -y git
|
||||
|
||||
# Install devtoolset
|
||||
ARG DEVTOOLSET_VERSION
|
||||
ADD ./common/install_devtoolset.sh install_devtoolset.sh
|
||||
@ -37,13 +27,11 @@ RUN rm install_glibc.sh
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
# Install conda and other packages (e.g., numpy, coverage, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
RUN rm /opt/conda/requirements-ci.txt
|
||||
|
||||
# (optional) Install protobuf for ONNX
|
||||
ARG PROTOBUF
|
||||
|
||||
@ -11,20 +11,10 @@ install_ubuntu() {
|
||||
# "$UBUNTU_VERSION" == "18.04"
|
||||
if [[ "$UBUNTU_VERSION" == "18.04"* ]]; then
|
||||
cmake3="cmake=3.10*"
|
||||
maybe_libiomp_dev="libiomp-dev"
|
||||
elif [[ "$UBUNTU_VERSION" == "20.04"* ]]; then
|
||||
cmake3="cmake=3.16*"
|
||||
maybe_libiomp_dev=""
|
||||
else
|
||||
cmake3="cmake=3.5*"
|
||||
maybe_libiomp_dev="libiomp-dev"
|
||||
fi
|
||||
|
||||
# TODO: Remove this once nvidia package repos are back online
|
||||
# Comment out nvidia repositories to prevent them from getting apt-get updated, see https://github.com/pytorch/pytorch/issues/74968
|
||||
# shellcheck disable=SC2046
|
||||
sed -i 's/.*nvidia.*/# &/' $(find /etc/apt/ -type f -name "*.list")
|
||||
|
||||
# Install common dependencies
|
||||
apt-get update
|
||||
# TODO: Some of these may not be necessary
|
||||
@ -43,21 +33,17 @@ install_ubuntu() {
|
||||
git \
|
||||
libatlas-base-dev \
|
||||
libc6-dbg \
|
||||
${maybe_libiomp_dev} \
|
||||
libiomp-dev \
|
||||
libyaml-dev \
|
||||
libz-dev \
|
||||
libjpeg-dev \
|
||||
libasound2-dev \
|
||||
libsndfile-dev \
|
||||
software-properties-common \
|
||||
wget \
|
||||
sudo \
|
||||
wget \
|
||||
vim
|
||||
|
||||
# Should resolve issues related to various apt package repository cert issues
|
||||
# see: https://github.com/pytorch/pytorch/issues/65931
|
||||
apt-get install -y libgnutls30
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
@ -123,11 +109,14 @@ esac
|
||||
# Install Valgrind separately since the apt-get version is too old.
|
||||
mkdir valgrind_build && cd valgrind_build
|
||||
VALGRIND_VERSION=3.16.1
|
||||
wget https://ossci-linux.s3.amazonaws.com/valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
if ! wget http://valgrind.org/downloads/valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
then
|
||||
wget https://sourceware.org/ftp/valgrind/valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
fi
|
||||
tar -xjf valgrind-${VALGRIND_VERSION}.tar.bz2
|
||||
cd valgrind-${VALGRIND_VERSION}
|
||||
./configure --prefix=/usr/local
|
||||
make -j6
|
||||
make -j 4
|
||||
sudo make install
|
||||
cd ../../
|
||||
rm -rf valgrind_build
|
||||
|
||||
@ -21,7 +21,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
;;
|
||||
esac
|
||||
|
||||
mkdir -p /opt/conda
|
||||
mkdir /opt/conda
|
||||
chown jenkins:jenkins /opt/conda
|
||||
|
||||
# Work around bug where devtoolset replaces sudo and breaks it.
|
||||
@ -68,16 +68,14 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
as_jenkins conda install -q -y python="$ANACONDA_PYTHON_VERSION" $*
|
||||
}
|
||||
|
||||
pip_install() {
|
||||
as_jenkins pip install --progress-bar off $*
|
||||
}
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
# DO NOT install cmake here as it would install a version newer than 3.10, but
|
||||
# we want to pin to version 3.10.
|
||||
SCIPY_VERSION=1.1.0
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
conda_install numpy=1.19.2 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0
|
||||
conda_install numpy=1.19.2 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0 -c conda-forge
|
||||
SCIPY_VERSION=1.6.0
|
||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0
|
||||
@ -88,24 +86,50 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six dataclasses typing_extensions
|
||||
fi
|
||||
|
||||
# Magma package names are concatenation of CUDA major and minor ignoring revision
|
||||
# I.e. magma-cuda102 package corresponds to CUDA_VERSION=10.2 and CUDA_VERSION=10.2.89
|
||||
if [ -n "$CUDA_VERSION" ]; then
|
||||
conda_install magma-cuda$(TMP=${CUDA_VERSION/./};echo ${TMP%.*[0-9]}) -c pytorch
|
||||
if [[ "$CUDA_VERSION" == 10.2* ]]; then
|
||||
conda_install magma-cuda102 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 11.0* ]]; then
|
||||
conda_install magma-cuda110 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 11.1* ]]; then
|
||||
conda_install magma-cuda111 -c pytorch
|
||||
elif [[ "$CUDA_VERSION" == 11.3* ]]; then
|
||||
conda_install magma-cuda113 -c pytorch
|
||||
fi
|
||||
|
||||
# TODO: This isn't working atm
|
||||
conda_install nnpack -c killeent
|
||||
|
||||
# Install some other packages, including those needed for Python test reporting
|
||||
pip_install -r /opt/conda/requirements-ci.txt
|
||||
# TODO: Why is scipy pinned
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
# Pin coverage so we can use COVERAGE_RCFILE
|
||||
as_jenkins pip install --progress-bar off pytest \
|
||||
scipy==$SCIPY_VERSION \
|
||||
scikit-image \
|
||||
psutil \
|
||||
unittest-xml-reporting \
|
||||
boto3==1.16.34 \
|
||||
coverage==5.5 \
|
||||
hypothesis==4.53.2 \
|
||||
expecttest==0.1.3 \
|
||||
mypy==0.812 \
|
||||
tb-nightly
|
||||
|
||||
# Install numba only on python-3.8 or below
|
||||
# For numba issue see https://github.com/pytorch/pytorch/issues/51511
|
||||
if [[ $(python -c "import sys; print(int(sys.version_info < (3, 9)))") == "1" ]]; then
|
||||
as_jenkins pip install --progress-bar off numba librosa>=0.6.2
|
||||
else
|
||||
as_jenkins pip install --progress-bar off numba==0.49.0 librosa>=0.6.2
|
||||
fi
|
||||
|
||||
# Update scikit-learn to a python-3.8 compatible version
|
||||
if [[ $(python -c "import sys; print(int(sys.version_info >= (3, 8)))") == "1" ]]; then
|
||||
pip_install -U scikit-learn
|
||||
as_jenkins pip install --progress-bar off -U scikit-learn
|
||||
else
|
||||
# Pinned scikit-learn due to https://github.com/scikit-learn/scikit-learn/issues/14485 (affects gcc 5.5 only)
|
||||
pip_install scikit-learn==0.20.3
|
||||
as_jenkins pip install --progress-bar off scikit-learn==0.20.3
|
||||
fi
|
||||
|
||||
popd
|
||||
|
||||
@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ ${CUDNN_VERSION} == 8 ]]; then
|
||||
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
|
||||
mkdir tmp_cudnn && cd tmp_cudnn
|
||||
CUDNN_NAME="cudnn-linux-x86_64-8.3.2.44_cuda11.5-archive"
|
||||
curl -OLs https://developer.download.nvidia.com/compute/redist/cudnn/v8.3.2/local_installers/11.5/${CUDNN_NAME}.tar.xz
|
||||
tar xf ${CUDNN_NAME}.tar.xz
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/include/
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/local/cuda/include/
|
||||
cp -a ${CUDNN_NAME}/include/* /usr/include/x86_64-linux-gnu/
|
||||
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/local/cuda/lib64/
|
||||
cp -a ${CUDNN_NAME}/lib/* /usr/lib/x86_64-linux-gnu/
|
||||
cd ..
|
||||
rm -rf tmp_cudnn
|
||||
ldconfig
|
||||
fi
|
||||
@ -7,18 +7,15 @@ if [ -n "$GCC_VERSION" ]; then
|
||||
# Need the official toolchain repo to get alternate packages
|
||||
add-apt-repository ppa:ubuntu-toolchain-r/test
|
||||
apt-get update
|
||||
if [[ "$UBUNTU_VERSION" == "16.04" && "${GCC_VERSION:0:1}" == "5" ]]; then
|
||||
if [ "$UBUNTU_VERSION" = "16.04" -a "$GCC_VERSION" = "5" ]; then
|
||||
apt-get install -y g++-5=5.4.0-6ubuntu1~16.04.12
|
||||
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 50
|
||||
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-5 50
|
||||
update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-5 50
|
||||
else
|
||||
apt-get install -y g++-$GCC_VERSION
|
||||
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50
|
||||
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50
|
||||
update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50
|
||||
fi
|
||||
|
||||
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-"$GCC_VERSION" 50
|
||||
update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-"$GCC_VERSION" 50
|
||||
update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-"$GCC_VERSION" 50
|
||||
|
||||
# Cleanup package manager
|
||||
apt-get autoclean && apt-get clean
|
||||
|
||||
@ -3,9 +3,6 @@
|
||||
set -ex
|
||||
|
||||
if [ -n "$KATEX" ]; then
|
||||
apt-get update
|
||||
# Ignore error if gpg-agent doesn't exist (for Ubuntu 16.04)
|
||||
apt-get install -y gpg-agent || :
|
||||
|
||||
curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
|
||||
sudo apt-get install -y nodejs
|
||||
|
||||
@ -4,11 +4,11 @@ set -ex
|
||||
|
||||
OPENSSL=openssl-1.1.1k
|
||||
|
||||
wget -q -O "${OPENSSL}.tar.gz" "https://ossci-linux.s3.amazonaws.com/${OPENSSL}.tar.gz"
|
||||
wget -q -O "${OPENSSL}.tar.gz" "https://www.openssl.org/source/${OPENSSL}.tar.gz"
|
||||
tar xf "${OPENSSL}.tar.gz"
|
||||
cd "${OPENSSL}"
|
||||
./config --prefix=/opt/openssl -d '-Wl,--enable-new-dtags,-rpath,$(LIBRPATH)'
|
||||
# NOTE: openssl install errors out when built with the -j option
|
||||
make -j6; make install_sw
|
||||
# NOTE: opensl errors out when built with the -j option
|
||||
make install_sw
|
||||
cd ..
|
||||
rm -rf "${OPENSSL}"
|
||||
|
||||
@ -14,9 +14,9 @@ install_protobuf_317() {
|
||||
|
||||
curl -LO "https://github.com/protocolbuffers/protobuf/releases/download/v3.17.3/protobuf-all-3.17.3.tar.gz"
|
||||
tar -xvz -C "$pb_dir" --strip-components 1 -f protobuf-all-3.17.3.tar.gz
|
||||
# -j6 to balance memory usage and speed.
|
||||
# -j2 to balance memory usage and speed.
|
||||
# naked `-j` seems to use too much memory.
|
||||
pushd "$pb_dir" && ./configure && make -j6 && make -j6 check && sudo make -j6 install && sudo ldconfig
|
||||
pushd "$pb_dir" && ./configure && make -j2 && make -j2 check && sudo make -j2 install && sudo ldconfig
|
||||
popd
|
||||
rm -rf $pb_dir
|
||||
}
|
||||
|
||||
@ -4,27 +4,22 @@ set -ex
|
||||
|
||||
install_magma() {
|
||||
# "install" hipMAGMA into /opt/rocm/magma by copying after build
|
||||
git clone https://bitbucket.org/icl/magma.git
|
||||
git clone https://bitbucket.org/icl/magma.git -b magma_ctrl_launch_bounds
|
||||
pushd magma
|
||||
# Fixes memory leaks of magma found while executing linalg UTs
|
||||
git checkout 5959b8783e45f1809812ed96ae762f38ee701972
|
||||
# The branch "magma_ctrl_launch_bounds" is having a fix over the below commit, so keeping the below comment for reference.
|
||||
#git checkout 878b1ce02e9cfe4a829be22c8f911e9c0b6bd88f
|
||||
# Work around non-asii characters in certain magma sources; remove this after upstream magma fixes this.
|
||||
perl -i.bak -pe 's/[^[:ascii:]]//g' sparse/control/magma_zfree.cpp
|
||||
perl -i.bak -pe 's/[^[:ascii:]]//g' sparse/control/magma_zsolverinfo.cpp
|
||||
cp make.inc-examples/make.inc.hip-gcc-mkl make.inc
|
||||
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
|
||||
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc
|
||||
echo 'DEVCCFLAGS += --gpu-max-threads-per-block=256' >> make.inc
|
||||
export PATH="${PATH}:/opt/rocm/bin"
|
||||
if [[ -n "$PYTORCH_ROCM_ARCH" ]]; then
|
||||
amdgpu_targets=`echo $PYTORCH_ROCM_ARCH | sed 's/;/ /g'`
|
||||
else
|
||||
amdgpu_targets=`rocm_agent_enumerator | grep -v gfx000 | sort -u | xargs`
|
||||
fi
|
||||
for arch in $amdgpu_targets; do
|
||||
echo "DEVCCFLAGS += --amdgpu-target=$arch" >> make.inc
|
||||
done
|
||||
echo 'DEVCCFLAGS += --amdgpu-target=gfx803 --amdgpu-target=gfx900 --amdgpu-target=gfx906 --amdgpu-target=gfx908 --gpu-max-threads-per-block=256' >> make.inc
|
||||
# hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition
|
||||
sed -i 's/^FOPENMP/#FOPENMP/g' make.inc
|
||||
export PATH="${PATH}:/opt/rocm/bin"
|
||||
make -f make.gen.hipMAGMA -j $(nproc)
|
||||
LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda
|
||||
make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda
|
||||
make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda
|
||||
popd
|
||||
mv magma /opt/rocm
|
||||
@ -34,19 +29,12 @@ ver() {
|
||||
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
|
||||
}
|
||||
|
||||
# Map ROCm version to AMDGPU version
|
||||
declare -A AMDGPU_VERSIONS=( ["4.5.2"]="21.40.2" ["5.0"]="21.50" ["5.1.1"]="22.10.1" )
|
||||
|
||||
install_ubuntu() {
|
||||
apt-get update
|
||||
if [[ $UBUNTU_VERSION == 18.04 ]]; then
|
||||
# gpg-agent is not available by default on 18.04
|
||||
apt-get install -y --no-install-recommends gpg-agent
|
||||
fi
|
||||
if [[ $UBUNTU_VERSION == 20.04 ]]; then
|
||||
# gpg-agent is not available by default on 20.04
|
||||
apt-get install -y --no-install-recommends gpg-agent
|
||||
fi
|
||||
apt-get install -y kmod
|
||||
apt-get install -y wget
|
||||
|
||||
@ -54,13 +42,6 @@ install_ubuntu() {
|
||||
apt-get install -y libc++1
|
||||
apt-get install -y libc++abi1
|
||||
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then
|
||||
# Add amdgpu repository
|
||||
UBUNTU_VERSION_NAME=`cat /etc/os-release | grep UBUNTU_CODENAME | awk -F= '{print $2}'`
|
||||
local amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/ubuntu"
|
||||
echo "deb [arch=amd64] ${amdgpu_baseurl} ${UBUNTU_VERSION_NAME} main" > /etc/apt/sources.list.d/amdgpu.list
|
||||
fi
|
||||
|
||||
ROCM_REPO="ubuntu"
|
||||
if [[ $(ver $ROCM_VERSION) -lt $(ver 4.2) ]]; then
|
||||
ROCM_REPO="xenial"
|
||||
@ -68,8 +49,7 @@ install_ubuntu() {
|
||||
|
||||
# Add rocm repository
|
||||
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
|
||||
local rocm_baseurl="http://repo.radeon.com/rocm/apt/${ROCM_VERSION}"
|
||||
echo "deb [arch=amd64] ${rocm_baseurl} ${ROCM_REPO} main" > /etc/apt/sources.list.d/rocm.list
|
||||
echo "deb [arch=amd64] http://repo.radeon.com/rocm/apt/${ROCM_VERSION} ${ROCM_REPO} main" > /etc/apt/sources.list.d/rocm.list
|
||||
apt-get update --allow-insecure-repositories
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
|
||||
@ -106,24 +86,11 @@ install_centos() {
|
||||
yum install -y epel-release
|
||||
yum install -y dkms kernel-headers-`uname -r` kernel-devel-`uname -r`
|
||||
|
||||
if [[ $(ver $ROCM_VERSION) -ge $(ver 4.5) ]]; then
|
||||
# Add amdgpu repository
|
||||
local amdgpu_baseurl="https://repo.radeon.com/amdgpu/${AMDGPU_VERSIONS[$ROCM_VERSION]}/rhel/7.9/main/x86_64"
|
||||
echo "[AMDGPU]" > /etc/yum.repos.d/amdgpu.repo
|
||||
echo "name=AMDGPU" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "baseurl=${amdgpu_baseurl}" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "enabled=1" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "gpgcheck=1" >> /etc/yum.repos.d/amdgpu.repo
|
||||
echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/amdgpu.repo
|
||||
fi
|
||||
|
||||
local rocm_baseurl="http://repo.radeon.com/rocm/yum/${ROCM_VERSION}"
|
||||
echo "[ROCm]" > /etc/yum.repos.d/rocm.repo
|
||||
echo "name=ROCm" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "baseurl=${rocm_baseurl}" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "baseurl=http://repo.radeon.com/rocm/yum/${ROCM_VERSION}" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "enabled=1" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "gpgcheck=1" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "gpgkey=http://repo.radeon.com/rocm/rocm.gpg.key" >> /etc/yum.repos.d/rocm.repo
|
||||
echo "gpgcheck=0" >> /etc/yum.repos.d/rocm.repo
|
||||
|
||||
yum update -y
|
||||
|
||||
|
||||
@ -3,11 +3,8 @@
|
||||
set -ex
|
||||
|
||||
# Mirror jenkins user in container
|
||||
# jenkins user as ec2-user should have the same user-id
|
||||
echo "jenkins:x:1000:1000::/var/lib/jenkins:" >> /etc/passwd
|
||||
echo "jenkins:x:1000:" >> /etc/group
|
||||
# Needed on focal or newer
|
||||
echo "jenkins:*:19110:0:99999:7:::" >>/etc/shadow
|
||||
echo "jenkins:x:1014:1014::/var/lib/jenkins:" >> /etc/passwd
|
||||
echo "jenkins:x:1014:" >> /etc/group
|
||||
|
||||
# Create $HOME
|
||||
mkdir -p /var/lib/jenkins
|
||||
@ -21,6 +18,3 @@ chown jenkins:jenkins /usr/local
|
||||
# Allow sudo
|
||||
# TODO: Maybe we shouldn't
|
||||
echo 'jenkins ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/jenkins
|
||||
|
||||
# Test that sudo works
|
||||
sudo -u jenkins sudo -v
|
||||
|
||||
@ -1,212 +0,0 @@
|
||||
# Python dependencies required for unit tests
|
||||
|
||||
#awscli==1.6 #this breaks some platforms
|
||||
#Description: AWS command line interface
|
||||
#Pinned versions: 1.6
|
||||
#test that import:
|
||||
|
||||
boto3==1.19.12
|
||||
#Description: AWS SDK for python
|
||||
#Pinned versions: 1.19.12, 1.16.34
|
||||
#test that import:
|
||||
|
||||
click
|
||||
#Description: Command Line Interface Creation Kit
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
coremltools==5.0b5
|
||||
#Description: Apple framework for ML integration
|
||||
#Pinned versions: 5.0b5
|
||||
#test that import:
|
||||
|
||||
#dataclasses #this breaks some platforms
|
||||
#Description: Provides decorators for auto adding special methods to user classes
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
expecttest==0.1.3
|
||||
#Description: method for writing tests where test framework auto populates
|
||||
# the expected output based on previous runs
|
||||
#Pinned versions: 0.1.3
|
||||
#test that import:
|
||||
|
||||
flatbuffers==2.0
|
||||
#Description: cross platform serialization library
|
||||
#Pinned versions: 2.0
|
||||
#test that import:
|
||||
|
||||
#future #this breaks linux-bionic-rocm4.5-py3.7
|
||||
#Description: compatibility layer between python 2 and python 3
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
hypothesis==4.53.2
|
||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||
#Description: advanced library for generating parametrized tests
|
||||
#Pinned versions: 3.44.6, 4.53.2
|
||||
#test that import: test_xnnpack_integration.py, test_pruning_op.py, test_nn.py
|
||||
|
||||
junitparser==2.1.1
|
||||
#Description: unitparser handles JUnit/xUnit Result XML files
|
||||
#Pinned versions: 2.1.1
|
||||
#test that import:
|
||||
|
||||
librosa>=0.6.2
|
||||
#Description: A python package for music and audio analysis
|
||||
#Pinned versions: >=0.6.2
|
||||
#test that import: test_spectral_ops.py
|
||||
|
||||
#mkl #this breaks linux-bionic-rocm4.5-py3.7
|
||||
#Description: Intel oneAPI Math Kernel Library
|
||||
#Pinned versions:
|
||||
#test that import: test_profiler.py, test_public_bindings.py, test_testing.py,
|
||||
#test_nn.py, test_mkldnn.py, test_jit.py, test_fx_experimental.py,
|
||||
#test_autograd.py
|
||||
|
||||
#mkl-devel
|
||||
# see mkl
|
||||
|
||||
#mock # breaks ci/circleci: docker-pytorch-linux-xenial-py3-clang5-android-ndk-r19c
|
||||
#Description: A testing library that allows you to replace parts of your
|
||||
#system under test with mock objects
|
||||
#Pinned versions:
|
||||
#test that import: test_module_init.py, test_modules.py, test_nn.py,
|
||||
#test_testing.py
|
||||
|
||||
#MonkeyType # breaks pytorch-xla-linux-bionic-py3.7-clang8
|
||||
#Description: collects runtime types of function arguments and return
|
||||
#values, and can automatically generate stub files
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==0.812
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 0.812
|
||||
#test that import: test_typing.py, test_type_hints.py
|
||||
|
||||
#networkx
|
||||
#Description: creation, manipulation, and study of
|
||||
#the structure, dynamics, and functions of complex networks
|
||||
#Pinned versions: 2.0
|
||||
#test that import:
|
||||
|
||||
#ninja
|
||||
#Description: build system. Note that it install from
|
||||
#here breaks things so it is commented out
|
||||
#Pinned versions: 1.10.0.post1
|
||||
#test that import: run_test.py, test_cpp_extensions_aot.py,test_determination.py
|
||||
|
||||
numba==0.49.0 ; python_version < "3.9"
|
||||
numba==0.54.1 ; python_version == "3.9"
|
||||
#Description: Just-In-Time Compiler for Numerical Functions
|
||||
#Pinned versions: 0.54.1, 0.49.0, <=0.49.1
|
||||
#test that import: test_numba_integration.py
|
||||
#For numba issue see https://github.com/pytorch/pytorch/issues/51511
|
||||
|
||||
#numpy
|
||||
#Description: Provides N-dimensional arrays and linear algebra
|
||||
#Pinned versions: 1.20
|
||||
#test that import: test_view_ops.py, test_unary_ufuncs.py, test_type_promotion.py,
|
||||
#test_type_info.py, test_torch.py, test_tensorexpr_pybind.py, test_tensorexpr.py,
|
||||
#test_tensorboard.py, test_tensor_creation_ops.py, test_static_runtime.py,
|
||||
#test_spectral_ops.py, test_sort_and_select.py, test_shape_ops.py,
|
||||
#test_segment_reductions.py, test_reductions.py, test_pruning_op.py,
|
||||
#test_overrides.py, test_numpy_interop.py, test_numba_integration.py
|
||||
#test_nn.py, test_namedtensor.py, test_linalg.py, test_jit_cuda_fuser.py,
|
||||
#test_jit.py, test_indexing.py, test_datapipe.py, test_dataloader.py,
|
||||
#test_binary_ufuncs.py
|
||||
|
||||
#onnxruntime
|
||||
#Description: scoring engine for Open Neural Network Exchange (ONNX) models
|
||||
#Pinned versions: 1.9.0
|
||||
#test that import:
|
||||
|
||||
#pillow
|
||||
#Description: Python Imaging Library fork
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#protobuf
|
||||
#Description: Google’s data interchange format
|
||||
#Pinned versions:
|
||||
#test that import: test_tensorboard.py
|
||||
|
||||
psutil
|
||||
#Description: information on running processes and system utilization
|
||||
#Pinned versions:
|
||||
#test that import: test_profiler.py, test_openmp.py, test_dataloader.py
|
||||
|
||||
pytest
|
||||
#Description: testing framework
|
||||
#Pinned versions:
|
||||
#test that import: test_typing.py, test_cpp_extensions_aot.py, run_test.py
|
||||
|
||||
#pytest-benchmark
|
||||
#Description: fixture for benchmarking code
|
||||
#Pinned versions: 3.2.3
|
||||
#test that import:
|
||||
|
||||
#pytest-sugar
|
||||
#Description: shows failures and errors instantly
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#PyYAML
|
||||
#Description: data serialization format
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#requests
|
||||
#Description: HTTP library
|
||||
#Pinned versions:
|
||||
#test that import: test_type_promotion.py
|
||||
|
||||
#rich
|
||||
#Description: rich text and beautiful formatting in the terminal
|
||||
#Pinned versions: 10.9.0
|
||||
#test that import:
|
||||
|
||||
scikit-image
|
||||
#Description: image processing routines
|
||||
#Pinned versions:
|
||||
#test that import: test_nn.py
|
||||
|
||||
#scikit-learn
|
||||
#Description: machine learning package
|
||||
#Pinned versions: 0.20.3
|
||||
#test that import:
|
||||
|
||||
scipy==1.6.3
|
||||
# Pin SciPy because of failing distribution tests (see #60347)
|
||||
#Description: scientific python
|
||||
#Pinned versions: 1.6.3
|
||||
#test that import: test_unary_ufuncs.py, test_torch.py,test_tensor_creation_ops.py
|
||||
#test_spectral_ops.py, test_sparse_csr.py, test_reductions.py,test_nn.py
|
||||
#test_linalg.py, test_binary_ufuncs.py
|
||||
|
||||
#tabulate
|
||||
#Description: Pretty-print tabular data
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
tb-nightly
|
||||
#Description: TensorBoard
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#typing-extensions
|
||||
#Description: type hints for python
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
#virtualenv
|
||||
#Description: virtual environment for python
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
unittest-xml-reporting<=3.2.0,>=2.0.0
|
||||
#Description: saves unit test results to xml
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
@ -1,11 +1,12 @@
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG IMAGE_NAME
|
||||
ARG CUDNN_VERSION
|
||||
|
||||
FROM ${IMAGE_NAME}
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel-ubuntu${UBUNTU_VERSION}
|
||||
|
||||
ARG UBUNTU_VERSION
|
||||
ARG CUDA_VERSION
|
||||
ARG CUDNN_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
@ -23,13 +24,11 @@ ARG KATEX
|
||||
ADD ./common/install_katex.sh install_katex.sh
|
||||
RUN bash ./install_katex.sh && rm install_katex.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
# Install conda and other packages (e.g., numpy, coverage, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
RUN rm /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
@ -76,7 +75,7 @@ RUN rm install_cmake.sh
|
||||
ADD ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
RUN bash ./install_cache.sh && rm install_cache.sh
|
||||
ENV CMAKE_CUDA_COMPILER_LAUNCHER=/opt/cache/bin/sccache
|
||||
ENV CUDA_NVCC_EXECUTABLE=/opt/cache/lib/nvcc
|
||||
|
||||
# Add jni.h for java host build
|
||||
ADD ./common/install_jni.sh install_jni.sh
|
||||
@ -95,16 +94,9 @@ ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
|
||||
# AWS specific CUDA build guidance
|
||||
ENV TORCH_CUDA_ARCH_LIST Maxwell
|
||||
ENV TORCH_NVCC_FLAGS "-Xfatbin -compress-all"
|
||||
ENV CUDA_PATH /usr/local/cuda
|
||||
|
||||
# Install LLVM dev version (Defined in the pytorch/builder github repository)
|
||||
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
|
||||
|
||||
# Install CUDNN
|
||||
ARG CUDNN_VERSION
|
||||
ADD ./common/install_cudnn.sh install_cudnn.sh
|
||||
RUN if [ "${CUDNN_VERSION}" -eq 8 ]; then bash install_cudnn.sh; fi
|
||||
RUN rm install_cudnn.sh
|
||||
|
||||
USER jenkins
|
||||
CMD ["bash"]
|
||||
|
||||
@ -6,10 +6,6 @@ ARG UBUNTU_VERSION
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
# Set AMD gpu targets to build for
|
||||
ARG PYTORCH_ROCM_ARCH
|
||||
ENV PYTORCH_ROCM_ARCH ${PYTORCH_ROCM_ARCH}
|
||||
|
||||
# Install common dependencies (so that this step can be cached separately)
|
||||
ARG EC2
|
||||
ADD ./common/install_base.sh install_base.sh
|
||||
@ -25,13 +21,11 @@ RUN bash ./install_clang.sh && rm install_clang.sh
|
||||
ADD ./common/install_user.sh install_user.sh
|
||||
RUN bash ./install_user.sh && rm install_user.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
# Install conda and other packages (e.g., numpy, coverage, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
RUN rm /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
|
||||
@ -33,13 +33,11 @@ ARG KATEX
|
||||
ADD ./common/install_katex.sh install_katex.sh
|
||||
RUN bash ./install_katex.sh && rm install_katex.sh
|
||||
|
||||
# Install conda and other packages (e.g., numpy, pytest)
|
||||
# Install conda and other packages (e.g., numpy, coverage, pytest)
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
ARG ANACONDA_PYTHON_VERSION
|
||||
ADD requirements-ci.txt /opt/conda/requirements-ci.txt
|
||||
ADD ./common/install_conda.sh install_conda.sh
|
||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||
RUN rm /opt/conda/requirements-ci.txt
|
||||
|
||||
# Install gcc
|
||||
ARG GCC_VERSION
|
||||
|
||||
13
.circleci/ecr_gc_docker/Dockerfile
Normal file
13
.circleci/ecr_gc_docker/Dockerfile
Normal file
@ -0,0 +1,13 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
RUN apt-get update && apt-get install -y python3-pip git && rm -rf /var/lib/apt/lists/* /var/log/dpkg.log
|
||||
|
||||
ADD requirements.txt /requirements.txt
|
||||
|
||||
RUN pip3 install -r /requirements.txt
|
||||
|
||||
ADD gc.py /usr/bin/gc.py
|
||||
|
||||
ADD docker_hub.py /usr/bin/docker_hub.py
|
||||
|
||||
ENTRYPOINT ["/usr/bin/gc.py"]
|
||||
125
.circleci/ecr_gc_docker/docker_hub.py
Executable file
125
.circleci/ecr_gc_docker/docker_hub.py
Executable file
@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
import boto3
|
||||
import requests
|
||||
import os
|
||||
|
||||
|
||||
IMAGE_INFO = namedtuple(
|
||||
"IMAGE_INFO", ("repo", "tag", "size", "last_updated_at", "last_updated_by")
|
||||
)
|
||||
|
||||
|
||||
def build_access_token(username, passwordtr):
|
||||
r = requests.post(
|
||||
"https://hub.docker.com/v2/users/login/",
|
||||
data={"username": username, "password": password},
|
||||
)
|
||||
r.raise_for_status()
|
||||
token = r.json().get("token")
|
||||
return {"Authorization": "JWT " + token}
|
||||
|
||||
|
||||
def list_repos(user, token):
|
||||
r = requests.get("https://hub.docker.com/v2/repositories/" + user, headers=token)
|
||||
r.raise_for_status()
|
||||
ret = sorted(
|
||||
repo["user"] + "/" + repo["name"] for repo in r.json().get("results", [])
|
||||
)
|
||||
if ret:
|
||||
print("repos found:")
|
||||
print("".join("\n\t" + r for r in ret))
|
||||
return ret
|
||||
|
||||
|
||||
def list_tags(repo, token):
|
||||
r = requests.get(
|
||||
"https://hub.docker.com/v2/repositories/" + repo + "/tags", headers=token
|
||||
)
|
||||
r.raise_for_status()
|
||||
return [
|
||||
IMAGE_INFO(
|
||||
repo=repo,
|
||||
tag=t["name"],
|
||||
size=t["full_size"],
|
||||
last_updated_at=t["last_updated"],
|
||||
last_updated_by=t["last_updater_username"],
|
||||
)
|
||||
for t in r.json().get("results", [])
|
||||
]
|
||||
|
||||
|
||||
def save_to_s3(tags):
|
||||
table_content = ""
|
||||
client = boto3.client("s3")
|
||||
for t in tags:
|
||||
table_content += (
|
||||
"<tr><td>{repo}</td><td>{tag}</td><td>{size}</td>"
|
||||
"<td>{last_updated_at}</td><td>{last_updated_by}</td></tr>"
|
||||
).format(
|
||||
repo=t.repo,
|
||||
tag=t.tag,
|
||||
size=t.size,
|
||||
last_updated_at=t.last_updated_at,
|
||||
last_updated_by=t.last_updated_by,
|
||||
)
|
||||
html_body = """
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet"
|
||||
href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
|
||||
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh"
|
||||
crossorigin="anonymous">
|
||||
<link rel="stylesheet" type="text/css"
|
||||
href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.css">
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js">
|
||||
</script>
|
||||
<script type="text/javascript" charset="utf8"
|
||||
src="https://cdn.datatables.net/1.10.20/js/jquery.dataTables.js"></script>
|
||||
<title> docker image info</title>
|
||||
</head>
|
||||
<body>
|
||||
<table class="table table-striped table-hover" id="docker">
|
||||
<caption>Docker images on docker hub</caption>
|
||||
<thead class="thead-dark">
|
||||
<tr>
|
||||
<th scope="col">repo</th>
|
||||
<th scope="col">tag</th>
|
||||
<th scope="col">size</th>
|
||||
<th scope="col">last_updated_at</th>
|
||||
<th scope="col">last_updated_by</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{table_content}
|
||||
</tbody>
|
||||
</table>
|
||||
</body>
|
||||
<script>
|
||||
$(document).ready( function () {{
|
||||
$('#docker').DataTable({{paging: false}});
|
||||
}} );py
|
||||
</script>
|
||||
</html>
|
||||
""".format(
|
||||
table_content=table_content
|
||||
)
|
||||
client.put_object(
|
||||
Bucket="docker.pytorch.org",
|
||||
ACL="public-read",
|
||||
Key="docker_hub.html",
|
||||
Body=html_body,
|
||||
ContentType="text/html",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
username = os.environ.get("DOCKER_HUB_USERNAME")
|
||||
password = os.environ.get("DOCKER_HUB_PASSWORD")
|
||||
token = build_access_token(username, password)
|
||||
tags = []
|
||||
for repo in list_repos("pytorch", token):
|
||||
tags.extend(list_tags(repo, token))
|
||||
save_to_s3(tags)
|
||||
218
.circleci/ecr_gc_docker/gc.py
Executable file
218
.circleci/ecr_gc_docker/gc.py
Executable file
@ -0,0 +1,218 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import boto3
|
||||
import datetime
|
||||
import pytz
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
def save_to_s3(project, data):
|
||||
table_content = ""
|
||||
client = boto3.client("s3")
|
||||
for repo, tag, window, age, pushed in data:
|
||||
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
|
||||
repo=repo, tag=tag, window=window, age=age, pushed=pushed
|
||||
)
|
||||
html_body = """
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet"
|
||||
href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
|
||||
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh"
|
||||
crossorigin="anonymous">
|
||||
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.20/css/jquery.dataTables.css">
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
|
||||
<script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.20/js/jquery.dataTables.js"></script>
|
||||
<title>{project} nightly and permanent docker image info</title>
|
||||
</head>
|
||||
<body>
|
||||
<table class="table table-striped table-hover" id="docker">
|
||||
<thead class="thead-dark">
|
||||
<tr>
|
||||
<th scope="col">repo</th>
|
||||
<th scope="col">tag</th>
|
||||
<th scope="col">keep window</th>
|
||||
<th scope="col">age</th>
|
||||
<th scope="col">pushed at</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{table_content}
|
||||
</tbody>
|
||||
</table>
|
||||
</body>
|
||||
<script>
|
||||
$(document).ready( function () {{
|
||||
$('#docker').DataTable({{paging: false}});
|
||||
}} );
|
||||
</script>
|
||||
</html>
|
||||
""".format(
|
||||
project=project, table_content=table_content
|
||||
)
|
||||
|
||||
# for pytorch, file can be found at
|
||||
# http://ossci-docker.s3-website.us-east-1.amazonaws.com/pytorch.html
|
||||
# and later one we can config docker.pytorch.org to point to the location
|
||||
|
||||
client.put_object(
|
||||
Bucket="docker.pytorch.org",
|
||||
ACL="public-read",
|
||||
Key="{project}.html".format(project=project),
|
||||
Body=html_body,
|
||||
ContentType="text/html",
|
||||
)
|
||||
|
||||
|
||||
def repos(client):
|
||||
paginator = client.get_paginator("describe_repositories")
|
||||
pages = paginator.paginate(registryId="308535385114")
|
||||
for page in pages:
|
||||
for repo in page["repositories"]:
|
||||
yield repo
|
||||
|
||||
|
||||
def images(client, repository):
|
||||
paginator = client.get_paginator("describe_images")
|
||||
pages = paginator.paginate(
|
||||
registryId="308535385114", repositoryName=repository["repositoryName"]
|
||||
)
|
||||
for page in pages:
|
||||
for image in page["imageDetails"]:
|
||||
yield image
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Delete old Docker tags from registry")
|
||||
parser.add_argument(
|
||||
"--dry-run", action="store_true", help="Dry run; print tags that would be deleted"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug", action="store_true", help="Debug, print ignored / saved tags"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-stable-days",
|
||||
type=int,
|
||||
default=14,
|
||||
help="Days of stable Docker tags to keep (non per-build images)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-unstable-days",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Days of unstable Docker tags to keep (per-build images)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--filter-prefix",
|
||||
type=str,
|
||||
default="",
|
||||
help="Only run cleanup for repositories with this prefix",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ignore-tags",
|
||||
type=str,
|
||||
default="",
|
||||
help="Never cleanup these tags (comma separated)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.ignore_tags or not args.filter_prefix:
|
||||
print(
|
||||
"""
|
||||
Missing required arguments --ignore-tags and --filter-prefix
|
||||
|
||||
You must specify --ignore-tags and --filter-prefix to avoid accidentally
|
||||
pruning a stable Docker tag which is being actively used. This will
|
||||
make you VERY SAD. So pay attention.
|
||||
|
||||
First, which filter-prefix do you want? The list of valid prefixes
|
||||
is in jobs/private.groovy under the 'docker-registry-cleanup' job.
|
||||
You probably want either pytorch or caffe2.
|
||||
|
||||
Second, which ignore-tags do you want? It should be whatever the most
|
||||
up-to-date DockerVersion for the repository in question is. Follow
|
||||
the imports of jobs/pytorch.groovy to find them.
|
||||
"""
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
client = boto3.client("ecr", region_name="us-east-1")
|
||||
stable_window = datetime.timedelta(days=args.keep_stable_days)
|
||||
unstable_window = datetime.timedelta(days=args.keep_unstable_days)
|
||||
now = datetime.datetime.now(pytz.UTC)
|
||||
ignore_tags = args.ignore_tags.split(",")
|
||||
|
||||
|
||||
def chunks(chunkable, n):
|
||||
""" Yield successive n-sized chunks from l.
|
||||
"""
|
||||
for i in range(0, len(chunkable), n):
|
||||
yield chunkable[i: i + n]
|
||||
|
||||
|
||||
SHA_PATTERN = re.compile(r'^[0-9a-f]{40}$')
|
||||
|
||||
|
||||
def looks_like_git_sha(tag):
|
||||
"""Returns a boolean to check if a tag looks like a git sha
|
||||
|
||||
For reference a sha1 is 40 characters with only 0-9a-f and contains no
|
||||
"-" characters
|
||||
"""
|
||||
return re.match(SHA_PATTERN, tag) is not None
|
||||
|
||||
|
||||
stable_window_tags = []
|
||||
for repo in repos(client):
|
||||
repositoryName = repo["repositoryName"]
|
||||
if not repositoryName.startswith(args.filter_prefix):
|
||||
continue
|
||||
|
||||
# Keep list of image digests to delete for this repository
|
||||
digest_to_delete = []
|
||||
|
||||
for image in images(client, repo):
|
||||
tags = image.get("imageTags")
|
||||
if not isinstance(tags, (list,)) or len(tags) == 0:
|
||||
continue
|
||||
created = image["imagePushedAt"]
|
||||
age = now - created
|
||||
for tag in tags:
|
||||
if any([
|
||||
looks_like_git_sha(tag),
|
||||
tag.isdigit(),
|
||||
tag.count("-") == 4, # TODO: Remove, this no longer applies as tags are now built using a SHA1
|
||||
tag in ignore_tags]):
|
||||
window = stable_window
|
||||
if tag in ignore_tags:
|
||||
stable_window_tags.append((repositoryName, tag, "", age, created))
|
||||
elif age < window:
|
||||
stable_window_tags.append((repositoryName, tag, window, age, created))
|
||||
else:
|
||||
window = unstable_window
|
||||
|
||||
if tag in ignore_tags or age < window:
|
||||
if args.debug:
|
||||
print("Ignoring {}:{} (age: {})".format(repositoryName, tag, age))
|
||||
break
|
||||
else:
|
||||
for tag in tags:
|
||||
print("{}Deleting {}:{} (age: {})".format("(dry run) " if args.dry_run else "", repositoryName, tag, age))
|
||||
digest_to_delete.append(image["imageDigest"])
|
||||
if args.dry_run:
|
||||
if args.debug:
|
||||
print("Skipping actual deletion, moving on...")
|
||||
else:
|
||||
# Issue batch delete for all images to delete for this repository
|
||||
# Note that as of 2018-07-25, the maximum number of images you can
|
||||
# delete in a single batch is 100, so chunk our list into batches of
|
||||
# 100
|
||||
for c in chunks(digest_to_delete, 100):
|
||||
client.batch_delete_image(
|
||||
registryId="308535385114",
|
||||
repositoryName=repositoryName,
|
||||
imageIds=[{"imageDigest": digest} for digest in c],
|
||||
)
|
||||
|
||||
save_to_s3(args.filter_prefix, stable_window_tags)
|
||||
3
.circleci/ecr_gc_docker/requirements.txt
Normal file
3
.circleci/ecr_gc_docker/requirements.txt
Normal file
@ -0,0 +1,3 @@
|
||||
boto3
|
||||
pytz
|
||||
requests
|
||||
@ -10,10 +10,18 @@ import shutil
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
|
||||
import cimodel.data.binary_build_definitions as binary_build_definitions
|
||||
import cimodel.data.pytorch_build_definitions as pytorch_build_definitions
|
||||
import cimodel.data.simple.android_definitions
|
||||
import cimodel.data.simple.binary_smoketest
|
||||
import cimodel.data.simple.docker_definitions
|
||||
import cimodel.data.simple.ios_definitions
|
||||
import cimodel.data.simple.macos_definitions
|
||||
import cimodel.data.simple.mobile_definitions
|
||||
import cimodel.data.simple.nightly_android
|
||||
import cimodel.data.simple.nightly_ios
|
||||
import cimodel.data.simple.anaconda_prune_defintions
|
||||
import cimodel.data.windows_build_definitions as windows_build_definitions
|
||||
import cimodel.lib.miniutils as miniutils
|
||||
import cimodel.lib.miniyaml as miniyaml
|
||||
|
||||
@ -70,20 +78,20 @@ class Header(object):
|
||||
for line in filter(None, lines):
|
||||
output_filehandle.write(line + "\n")
|
||||
|
||||
def _for_all_items(items, functor) -> None:
|
||||
if isinstance(items, list):
|
||||
for item in items:
|
||||
_for_all_items(item, functor)
|
||||
if isinstance(items, dict) and len(items) == 1:
|
||||
item_type, item = next(iter(items.items()))
|
||||
functor(item_type, item)
|
||||
|
||||
def filter_master_only_jobs(items):
|
||||
def _is_main_or_master_item(item):
|
||||
def _for_all_items(items, functor) -> None:
|
||||
if isinstance(items, list):
|
||||
for item in items:
|
||||
_for_all_items(item, functor)
|
||||
if isinstance(items, dict) and len(items) == 1:
|
||||
item_type, item = next(iter(items.items()))
|
||||
functor(item_type, item)
|
||||
|
||||
def _is_master_item(item):
|
||||
filters = item.get('filters', None)
|
||||
branches = filters.get('branches', None) if filters is not None else None
|
||||
branches_only = branches.get('only', None) if branches is not None else None
|
||||
return ('main' in branches_only or 'master' in branches_only) if branches_only is not None else False
|
||||
return 'master' in branches_only if branches_only is not None else False
|
||||
|
||||
master_deps = set()
|
||||
|
||||
@ -92,7 +100,7 @@ def filter_master_only_jobs(items):
|
||||
item_name = item.get("name", None)
|
||||
if not isinstance(requires, list):
|
||||
return
|
||||
if _is_main_or_master_item(item) or item_name in master_deps:
|
||||
if _is_master_item(item) or item_name in master_deps:
|
||||
master_deps.update([n.strip('"') for n in requires])
|
||||
|
||||
def _do_filtering(items):
|
||||
@ -103,7 +111,7 @@ def filter_master_only_jobs(items):
|
||||
item_type, item = next(iter(items.items()))
|
||||
item_name = item.get("name", None)
|
||||
item_name = item_name.strip('"') if item_name is not None else None
|
||||
if not _is_main_or_master_item(item) and item_name not in master_deps:
|
||||
if not _is_master_item(item) and item_name not in master_deps:
|
||||
return None
|
||||
if 'filters' in item:
|
||||
item = item.copy()
|
||||
@ -111,55 +119,62 @@ def filter_master_only_jobs(items):
|
||||
return {item_type: item}
|
||||
|
||||
# Scan of dependencies twice to pick up nested required jobs
|
||||
# I.e. jobs depending on jobs that main-only job depend on
|
||||
# I.e. jobs depending on jobs that master-only job depend on
|
||||
_for_all_items(items, _save_requires_if_master)
|
||||
_for_all_items(items, _save_requires_if_master)
|
||||
return _do_filtering(items)
|
||||
|
||||
def generate_required_docker_images(items):
|
||||
required_docker_images = set()
|
||||
|
||||
def _requires_docker_image(item_type, item):
|
||||
requires = item.get('requires', None)
|
||||
if not isinstance(requires, list):
|
||||
return
|
||||
for requirement in requires:
|
||||
requirement = requirement.replace('"', '')
|
||||
if requirement.startswith('docker-'):
|
||||
required_docker_images.add(requirement)
|
||||
|
||||
_for_all_items(items, _requires_docker_image)
|
||||
return required_docker_images
|
||||
|
||||
def gen_build_workflows_tree():
|
||||
build_workflows_functions = [
|
||||
cimodel.data.simple.docker_definitions.get_workflow_jobs,
|
||||
pytorch_build_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.macos_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.android_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.ios_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.mobile_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.binary_smoketest.get_workflow_jobs,
|
||||
cimodel.data.simple.nightly_ios.get_workflow_jobs,
|
||||
cimodel.data.simple.nightly_android.get_workflow_jobs,
|
||||
cimodel.data.simple.anaconda_prune_defintions.get_workflow_jobs,
|
||||
windows_build_definitions.get_windows_workflows,
|
||||
binary_build_definitions.get_post_upload_jobs,
|
||||
binary_build_definitions.get_binary_smoke_test_jobs,
|
||||
]
|
||||
build_jobs = [f() for f in build_workflows_functions]
|
||||
build_jobs.extend(
|
||||
cimodel.data.simple.docker_definitions.get_workflow_jobs(
|
||||
# sort for consistency
|
||||
sorted(generate_required_docker_images(build_jobs))
|
||||
)
|
||||
)
|
||||
master_build_jobs = filter_master_only_jobs(build_jobs)
|
||||
|
||||
rc = {
|
||||
binary_build_functions = [
|
||||
binary_build_definitions.get_binary_build_jobs,
|
||||
binary_build_definitions.get_nightly_tests,
|
||||
binary_build_definitions.get_nightly_uploads,
|
||||
]
|
||||
|
||||
slow_gradcheck_jobs = [
|
||||
pytorch_build_definitions.get_workflow_jobs,
|
||||
cimodel.data.simple.docker_definitions.get_workflow_jobs,
|
||||
]
|
||||
|
||||
return {
|
||||
"workflows": {
|
||||
"binary_builds": {
|
||||
"when": r"<< pipeline.parameters.run_binary_tests >>",
|
||||
"jobs": [f() for f in binary_build_functions],
|
||||
},
|
||||
"build": {
|
||||
"when": r"<< pipeline.parameters.run_build >>",
|
||||
"jobs": build_jobs,
|
||||
},
|
||||
"master_build": {
|
||||
"when": r"<< pipeline.parameters.run_master_build >>",
|
||||
"jobs": master_build_jobs,
|
||||
},
|
||||
"slow_gradcheck_build": {
|
||||
"when": r"<< pipeline.parameters.run_slow_gradcheck_build >>",
|
||||
"jobs": [f(only_slow_gradcheck=True) for f in slow_gradcheck_jobs],
|
||||
},
|
||||
}
|
||||
}
|
||||
if len(master_build_jobs) > 0:
|
||||
rc["workflows"]["master_build"] = {
|
||||
"when": r"<< pipeline.parameters.run_master_build >>",
|
||||
"jobs": master_build_jobs,
|
||||
}
|
||||
return rc
|
||||
|
||||
|
||||
# Order of this list matters to the generated config.yml.
|
||||
@ -170,14 +185,20 @@ YAML_SOURCES = [
|
||||
Header("Build parameters"),
|
||||
File("build-parameters/pytorch-build-params.yml"),
|
||||
File("build-parameters/binary-build-params.yml"),
|
||||
File("build-parameters/promote-build-params.yml"),
|
||||
Header("Job specs"),
|
||||
File("job-specs/pytorch-job-specs.yml"),
|
||||
File("job-specs/binary-job-specs.yml"),
|
||||
File("job-specs/job-specs-custom.yml"),
|
||||
File("job-specs/job-specs-promote.yml"),
|
||||
File("job-specs/binary_update_htmls.yml"),
|
||||
File("job-specs/binary-build-tests.yml"),
|
||||
File("job-specs/docker_jobs.yml"),
|
||||
Header("Workflows"),
|
||||
Treegen(gen_build_workflows_tree, 0),
|
||||
File("workflows/workflows-scheduled-ci.yml"),
|
||||
File("workflows/workflows-ecr-gc.yml"),
|
||||
File("workflows/workflows-promote.yml"),
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -49,9 +49,8 @@ if [[ -n "${CIRCLE_PR_NUMBER:-}" ]]; then
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
elif [[ -n "${CIRCLE_SHA1:-}" ]]; then
|
||||
# Scheduled workflows & "smoke" binary build on master on PR merges
|
||||
DEFAULT_BRANCH="$(git remote show $CIRCLE_REPOSITORY_URL | awk '/HEAD branch/ {print $NF}')"
|
||||
git reset --hard "$CIRCLE_SHA1"
|
||||
git checkout -q -B $DEFAULT_BRANCH
|
||||
git checkout -q -B master
|
||||
else
|
||||
echo "Can't tell what to checkout"
|
||||
exit 1
|
||||
@ -62,7 +61,7 @@ git --no-pager log --max-count 1
|
||||
popd
|
||||
|
||||
# Clone the Builder master repo
|
||||
retry git clone -q https://github.com/pytorch/builder.git -b release/1.12 "$BUILDER_ROOT"
|
||||
retry git clone -q https://github.com/pytorch/builder.git -b release/1.10 "$BUILDER_ROOT"
|
||||
pushd "$BUILDER_ROOT"
|
||||
echo "Using builder from "
|
||||
git --no-pager log --max-count 1
|
||||
|
||||
@ -27,4 +27,4 @@ if ! [ -x "$(command -v xcodebuild)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
PROFILE=PyTorch_CI_2022
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID}
|
||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID} -f Accelerate,MetalPerformanceShaders,CoreML
|
||||
|
||||
@ -23,23 +23,14 @@ do
|
||||
fi
|
||||
done
|
||||
lipo -i ${ZIP_DIR}/install/lib/*.a
|
||||
echo "BUILD_LITE_INTERPRETER: ${BUILD_LITE_INTERPRETER}"
|
||||
# copy the umbrella header and license
|
||||
if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then
|
||||
cp ${PROJ_ROOT}/ios/LibTorch-Lite.h ${ZIP_DIR}/src/
|
||||
else
|
||||
cp ${PROJ_ROOT}/ios/LibTorch.h ${ZIP_DIR}/src/
|
||||
fi
|
||||
cp ${PROJ_ROOT}/ios/LibTorch-Lite.h ${ZIP_DIR}/src/
|
||||
cp ${PROJ_ROOT}/LICENSE ${ZIP_DIR}/
|
||||
# zip the library
|
||||
export DATE="$(date -u +%Y%m%d)"
|
||||
export IOS_NIGHTLY_BUILD_VERSION="1.12.0.${DATE}"
|
||||
if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then
|
||||
# libtorch_lite_ios_nightly_1.11.0.20210810.zip
|
||||
ZIPFILE="libtorch_lite_ios_nightly_${IOS_NIGHTLY_BUILD_VERSION}.zip"
|
||||
else
|
||||
ZIPFILE="libtorch_ios_nightly_build.zip"
|
||||
fi
|
||||
export IOS_NIGHTLY_BUILD_VERSION="1.10.0.${DATE}"
|
||||
# libtorch_lite_ios_nightly_1.10.0.20210810.zip
|
||||
ZIPFILE="libtorch_lite_ios_nightly_${IOS_NIGHTLY_BUILD_VERSION}.zip"
|
||||
cd ${ZIP_DIR}
|
||||
#for testing
|
||||
touch version.txt
|
||||
@ -61,15 +52,13 @@ set +x
|
||||
# echo "AWS SECRET: ${AWS_SECRET_ACCESS_KEY}"
|
||||
aws s3 cp ${ZIPFILE} s3://ossci-ios-build/ --acl public-read
|
||||
|
||||
if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then
|
||||
# create a new LibTorch-Lite-Nightly.podspec from the template
|
||||
echo "cp ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec.template ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec"
|
||||
cp ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec.template ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec
|
||||
# create a new LibTorch-Lite-Nightly.podspec from the template
|
||||
echo "cp ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec.template ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec"
|
||||
cp ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec.template ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec
|
||||
|
||||
# update pod version
|
||||
sed -i '' -e "s/IOS_NIGHTLY_BUILD_VERSION/${IOS_NIGHTLY_BUILD_VERSION}/g" ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec
|
||||
cat ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec
|
||||
# update pod version
|
||||
sed -i '' -e "s/IOS_NIGHTLY_BUILD_VERSION/${IOS_NIGHTLY_BUILD_VERSION}/g" ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec
|
||||
cat ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec
|
||||
|
||||
# push the new LibTorch-Lite-Nightly.podspec to CocoaPods
|
||||
pod trunk push --verbose --allow-warnings --use-libraries --skip-import-validation ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec
|
||||
fi
|
||||
# push the new LibTorch-Lite-Nightly.podspec to CocoaPods
|
||||
pod trunk push --verbose --allow-warnings --use-libraries --skip-import-validation ${PROJ_ROOT}/ios/LibTorch-Lite-Nightly.podspec
|
||||
|
||||
@ -11,7 +11,7 @@ NUM_CPUS=$(( $(nproc) - 2 ))
|
||||
# Defaults here for **binary** linux builds so they can be changed in one place
|
||||
export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))}
|
||||
|
||||
if [[ "${DESIRED_CUDA}" =~ cu11[0-9] ]]; then
|
||||
if [[ "${DESIRED_CUDA}" == "cu111" || "${DESIRED_CUDA}" == "cu113" ]]; then
|
||||
export BUILD_SPLIT_CUDA="ON"
|
||||
fi
|
||||
|
||||
@ -26,7 +26,7 @@ else
|
||||
build_script='manywheel/build.sh'
|
||||
fi
|
||||
|
||||
if [[ "$CIRCLE_BRANCH" == "main" ]] || [[ "$CIRCLE_BRANCH" == "master" ]] || [[ "$CIRCLE_BRANCH" == release/* ]]; then
|
||||
if [[ "$CIRCLE_BRANCH" == "master" ]] || [[ "$CIRCLE_BRANCH" == release/* ]]; then
|
||||
export BUILD_DEBUG_INFO=1
|
||||
fi
|
||||
|
||||
|
||||
@ -1,24 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
OUTPUT_SCRIPT=${OUTPUT_SCRIPT:-/home/circleci/project/ci_test_script.sh}
|
||||
|
||||
# only source if file exists
|
||||
if [[ -f /home/circleci/project/env ]]; then
|
||||
source /home/circleci/project/env
|
||||
fi
|
||||
cat >"${OUTPUT_SCRIPT}" <<EOL
|
||||
source /home/circleci/project/env
|
||||
cat >/home/circleci/project/ci_test_script.sh <<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
set -eux -o pipefail
|
||||
|
||||
retry () {
|
||||
"\$@" || (sleep 1 && "\$@") || (sleep 2 && "\$@")
|
||||
}
|
||||
|
||||
# Source binary env file here if exists
|
||||
if [[ -e "${BINARY_ENV_FILE:-/nofile}" ]]; then
|
||||
source "${BINARY_ENV_FILE:-/nofile}"
|
||||
fi
|
||||
|
||||
python_nodot="\$(echo $DESIRED_PYTHON | tr -d m.u)"
|
||||
|
||||
# Set up Python
|
||||
@ -37,23 +23,14 @@ fi
|
||||
|
||||
EXTRA_CONDA_FLAGS=""
|
||||
NUMPY_PIN=""
|
||||
PROTOBUF_PACKAGE="defaults::protobuf"
|
||||
if [[ "\$python_nodot" = *310* ]]; then
|
||||
EXTRA_CONDA_FLAGS="-c=conda-forge"
|
||||
# There's an issue with conda channel priority where it'll randomly pick 1.19 over 1.20
|
||||
# we set a lower boundary here just to be safe
|
||||
NUMPY_PIN=">=1.21.2"
|
||||
PROTOBUF_PACKAGE="protobuf>=3.19.0"
|
||||
fi
|
||||
|
||||
if [[ "\$python_nodot" = *39* ]]; then
|
||||
if [[ "\$python_nodot" = *39* ]]; then
|
||||
EXTRA_CONDA_FLAGS="-c=conda-forge"
|
||||
# There's an issue with conda channel priority where it'll randomly pick 1.19 over 1.20
|
||||
# we set a lower boundary here just to be safe
|
||||
NUMPY_PIN=">=1.20"
|
||||
fi
|
||||
|
||||
if [[ "$DESIRED_CUDA" == "cu116" ]]; then
|
||||
if [[ "$DESIRED_CUDA" == "cu112" ]]; then
|
||||
EXTRA_CONDA_FLAGS="-c=conda-forge"
|
||||
fi
|
||||
|
||||
@ -67,8 +44,7 @@ mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to m
|
||||
# TODO there is duplicated and inconsistent test-python-env setup across this
|
||||
# file, builder/smoke_test.sh, and builder/run_tests.sh, and also in the
|
||||
# conda build scripts themselves. These should really be consolidated
|
||||
# Pick only one package of multiple available (which happens as result of workflow re-runs)
|
||||
pkg="/final_pkgs/\$(ls -1 /final_pkgs|sort|tail -1)"
|
||||
pkg="/final_pkgs/\$(ls /final_pkgs)"
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
(
|
||||
# For some reason conda likes to re-activate the conda environment when attempting this install
|
||||
@ -83,7 +59,7 @@ if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
ninja \
|
||||
dataclasses \
|
||||
typing-extensions \
|
||||
${PROTOBUF_PACKAGE} \
|
||||
defaults::protobuf \
|
||||
six
|
||||
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
|
||||
retry conda install -c pytorch -y cpuonly
|
||||
@ -116,4 +92,4 @@ EOL
|
||||
echo
|
||||
echo
|
||||
echo "The script that will run in the next step is:"
|
||||
cat "${OUTPUT_SCRIPT}"
|
||||
cat /home/circleci/project/ci_test_script.sh
|
||||
|
||||
@ -1,19 +1,28 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
source "${BINARY_ENV_FILE:-/Users/distiller/project/env}"
|
||||
source "/Users/distiller/project/env"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
if [[ -z "${IS_GHA:-}" ]]; then
|
||||
export PATH="${workdir:-${HOME}}/miniconda/bin:${PATH}"
|
||||
fi
|
||||
# For some reason `unbuffer` breaks if we change the PATH here, so we
|
||||
# write a script with the PATH change in it and unbuffer the whole
|
||||
# thing
|
||||
build_script="$workdir/build_script.sh"
|
||||
touch "$build_script"
|
||||
chmod +x "$build_script"
|
||||
|
||||
# Build
|
||||
export USE_PYTORCH_METAL_EXPORT=1
|
||||
export USE_COREML_DELEGATE=1
|
||||
cat >"$build_script" <<EOL
|
||||
export PATH="$workdir/miniconda/bin:$PATH"
|
||||
if [[ "$CIRCLE_BRANCH" == "nightly" ]]; then
|
||||
export USE_PYTORCH_METAL_EXPORT=1
|
||||
export USE_COREML_DELEGATE=1
|
||||
fi
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
"${BUILDER_ROOT}/conda/build_pytorch.sh"
|
||||
"$workdir/builder/conda/build_pytorch.sh"
|
||||
else
|
||||
export TORCH_PACKAGE_NAME="$(echo $TORCH_PACKAGE_NAME | tr '-' '_')"
|
||||
"${BUILDER_ROOT}/wheel/build_wheel.sh"
|
||||
"$workdir/builder/wheel/build_wheel.sh"
|
||||
fi
|
||||
EOL
|
||||
unbuffer "$build_script" | ts
|
||||
|
||||
@ -5,70 +5,53 @@ export TZ=UTC
|
||||
tagged_version() {
|
||||
# Grabs version from either the env variable CIRCLE_TAG
|
||||
# or the pytorch git described version
|
||||
if [[ "$OSTYPE" == "msys" && -z "${IS_GHA:-}" ]]; then
|
||||
GIT_DIR="${workdir}/p/.git"
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
GIT_DESCRIBE="git --git-dir ${workdir}/p/.git describe"
|
||||
else
|
||||
GIT_DIR="${workdir}/pytorch/.git"
|
||||
GIT_DESCRIBE="git --git-dir ${workdir}/pytorch/.git describe"
|
||||
fi
|
||||
GIT_DESCRIBE="git --git-dir ${GIT_DIR} describe --tags --match v[0-9]*.[0-9]*.[0-9]*"
|
||||
if [[ -n "${CIRCLE_TAG:-}" ]]; then
|
||||
echo "${CIRCLE_TAG}"
|
||||
elif [[ ! -d "${GIT_DIR}" ]]; then
|
||||
echo "Abort, abort! Git dir ${GIT_DIR} does not exists!"
|
||||
kill $$
|
||||
elif ${GIT_DESCRIBE} --exact >/dev/null; then
|
||||
${GIT_DESCRIBE}
|
||||
elif ${GIT_DESCRIBE} --exact --tags >/dev/null; then
|
||||
${GIT_DESCRIBE} --tags
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# These are only relevant for CircleCI
|
||||
# TODO: Remove these later once migrated fully to GHA
|
||||
if [[ -z ${IS_GHA:-} ]]; then
|
||||
# We need to write an envfile to persist these variables to following
|
||||
# steps, but the location of the envfile depends on the circleci executor
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# macos executor (builds and tests)
|
||||
workdir="/Users/distiller/project"
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
# windows executor (builds and tests)
|
||||
workdir="/c/w"
|
||||
elif [[ -d "/home/circleci/project" ]]; then
|
||||
# machine executor (binary tests)
|
||||
workdir="/home/circleci/project"
|
||||
else
|
||||
# docker executor (binary builds)
|
||||
workdir="/"
|
||||
fi
|
||||
envfile="$workdir/env"
|
||||
touch "$envfile"
|
||||
chmod +x "$envfile"
|
||||
# We need to write an envfile to persist these variables to following
|
||||
# steps, but the location of the envfile depends on the circleci executor
|
||||
if [[ "$(uname)" == Darwin ]]; then
|
||||
# macos executor (builds and tests)
|
||||
workdir="/Users/distiller/project"
|
||||
elif [[ "$OSTYPE" == "msys" ]]; then
|
||||
# windows executor (builds and tests)
|
||||
workdir="/c/w"
|
||||
elif [[ -d "/home/circleci/project" ]]; then
|
||||
# machine executor (binary tests)
|
||||
workdir="/home/circleci/project"
|
||||
else
|
||||
# docker executor (binary builds)
|
||||
workdir="/"
|
||||
fi
|
||||
envfile="$workdir/env"
|
||||
touch "$envfile"
|
||||
chmod +x "$envfile"
|
||||
|
||||
# Parse the BUILD_ENVIRONMENT to package type, python, and cuda
|
||||
configs=($BUILD_ENVIRONMENT)
|
||||
export PACKAGE_TYPE="${configs[0]}"
|
||||
export DESIRED_PYTHON="${configs[1]}"
|
||||
export DESIRED_CUDA="${configs[2]}"
|
||||
if [[ "${OSTYPE}" == "msys" ]]; then
|
||||
export DESIRED_DEVTOOLSET=""
|
||||
export LIBTORCH_CONFIG="${configs[3]:-}"
|
||||
if [[ "$LIBTORCH_CONFIG" == 'debug' ]]; then
|
||||
export DEBUG=1
|
||||
fi
|
||||
else
|
||||
export DESIRED_DEVTOOLSET="${configs[3]:-}"
|
||||
# Parse the BUILD_ENVIRONMENT to package type, python, and cuda
|
||||
configs=($BUILD_ENVIRONMENT)
|
||||
export PACKAGE_TYPE="${configs[0]}"
|
||||
export DESIRED_PYTHON="${configs[1]}"
|
||||
export DESIRED_CUDA="${configs[2]}"
|
||||
if [[ "${BUILD_FOR_SYSTEM:-}" == "windows" ]]; then
|
||||
export DESIRED_DEVTOOLSET=""
|
||||
export LIBTORCH_CONFIG="${configs[3]:-}"
|
||||
if [[ "$LIBTORCH_CONFIG" == 'debug' ]]; then
|
||||
export DEBUG=1
|
||||
fi
|
||||
else
|
||||
envfile=${BINARY_ENV_FILE:-/tmp/env}
|
||||
if [[ -n "${PYTORCH_ROOT}" ]]; then
|
||||
workdir=$(dirname "${PYTORCH_ROOT}")
|
||||
else
|
||||
# docker executor (binary builds)
|
||||
workdir="/"
|
||||
fi
|
||||
export DESIRED_DEVTOOLSET="${configs[3]:-}"
|
||||
fi
|
||||
|
||||
if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
export BUILD_PYTHONLESS=1
|
||||
fi
|
||||
@ -91,13 +74,18 @@ if [[ ${DESIRED_CUDA} == "cpu" ]]; then
|
||||
USE_GOLD_LINKER="ON"
|
||||
fi
|
||||
|
||||
USE_WHOLE_CUDNN="OFF"
|
||||
# Link whole cuDNN for CUDA-11.1 to include fp16 fast kernels
|
||||
if [[ "$(uname)" == "Linux" && "${DESIRED_CUDA}" == "cu111" ]]; then
|
||||
USE_WHOLE_CUDNN="ON"
|
||||
fi
|
||||
|
||||
# Default to nightly, since that's where this normally uploads to
|
||||
PIP_UPLOAD_FOLDER='nightly/'
|
||||
# We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it
|
||||
export DATE="$(date -u +%Y%m%d)"
|
||||
#TODO: We should be pulling semver version from the base version.txt
|
||||
BASE_BUILD_VERSION="1.12.0.dev$DATE"
|
||||
BASE_BUILD_VERSION="1.10.0.dev$DATE"
|
||||
# Change BASE_BUILD_VERSION to git tag when on a git tag
|
||||
# Use 'git -C' to make doubly sure we're in the correct directory for checking
|
||||
# the git tag
|
||||
@ -143,28 +131,24 @@ if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
cat >"$envfile" <<EOL
|
||||
cat >>"$envfile" <<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
export TZ=UTC
|
||||
echo "Running on $(uname -a) at $(date)"
|
||||
|
||||
export PACKAGE_TYPE="$PACKAGE_TYPE"
|
||||
export DESIRED_PYTHON="${DESIRED_PYTHON:-}"
|
||||
export DESIRED_PYTHON="$DESIRED_PYTHON"
|
||||
export DESIRED_CUDA="$DESIRED_CUDA"
|
||||
export LIBTORCH_VARIANT="${LIBTORCH_VARIANT:-}"
|
||||
export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}"
|
||||
if [[ "${OSTYPE}" == "msys" ]]; then
|
||||
export DESIRED_DEVTOOLSET="$DESIRED_DEVTOOLSET"
|
||||
if [[ "${BUILD_FOR_SYSTEM:-}" == "windows" ]]; then
|
||||
export LIBTORCH_CONFIG="${LIBTORCH_CONFIG:-}"
|
||||
if [[ "${LIBTORCH_CONFIG:-}" == 'debug' ]]; then
|
||||
export DEBUG=1
|
||||
fi
|
||||
export DESIRED_DEVTOOLSET=""
|
||||
else
|
||||
export DESIRED_DEVTOOLSET="${DESIRED_DEVTOOLSET:-}"
|
||||
export DEBUG="${DEBUG:-}"
|
||||
fi
|
||||
|
||||
export DATE="$DATE"
|
||||
export NIGHTLIES_DATE_PREAMBLE=1.12.0.dev
|
||||
export NIGHTLIES_DATE_PREAMBLE=1.10.0.dev
|
||||
export PYTORCH_BUILD_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
export PYTORCH_BUILD_NUMBER="$PYTORCH_BUILD_NUMBER"
|
||||
export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
@ -172,7 +156,6 @@ export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
|
||||
# TODO: We don't need this anymore IIUC
|
||||
export TORCH_PACKAGE_NAME='torch'
|
||||
export TORCH_CONDA_BUILD_FOLDER='pytorch-nightly'
|
||||
export ANACONDA_USER='pytorch'
|
||||
|
||||
export USE_FBGEMM=1
|
||||
export JAVA_HOME=$JAVA_HOME
|
||||
@ -180,48 +163,30 @@ export BUILD_JNI=$BUILD_JNI
|
||||
export PIP_UPLOAD_FOLDER="$PIP_UPLOAD_FOLDER"
|
||||
export DOCKER_IMAGE="$DOCKER_IMAGE"
|
||||
|
||||
export workdir="$workdir"
|
||||
export MAC_PACKAGE_WORK_DIR="$workdir"
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export PYTORCH_ROOT="$workdir/p"
|
||||
export BUILDER_ROOT="$workdir/b"
|
||||
else
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
fi
|
||||
export MINICONDA_ROOT="$workdir/miniconda"
|
||||
export PYTORCH_FINAL_PACKAGE_DIR="$workdir/final_pkgs"
|
||||
|
||||
export CIRCLE_TAG="${CIRCLE_TAG:-}"
|
||||
export CIRCLE_SHA1="$CIRCLE_SHA1"
|
||||
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
|
||||
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
|
||||
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
||||
|
||||
export USE_GOLD_LINKER="${USE_GOLD_LINKER}"
|
||||
export USE_GLOO_WITH_OPENSSL="ON"
|
||||
export USE_WHOLE_CUDNN="${USE_WHOLE_CUDNN}"
|
||||
# =================== The above code will be executed inside Docker container ===================
|
||||
EOL
|
||||
|
||||
# nproc doesn't exist on darwin
|
||||
if [[ "$(uname)" != Darwin ]]; then
|
||||
# Because most Circle executors only have 20 CPUs, using more causes OOMs w/ Ninja and nvcc parallelization
|
||||
MEMORY_LIMIT_MAX_JOBS=18
|
||||
NUM_CPUS=$(( $(nproc) - 2 ))
|
||||
|
||||
# Defaults here for **binary** linux builds so they can be changed in one place
|
||||
export MAX_JOBS=${MAX_JOBS:-$(( ${NUM_CPUS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${NUM_CPUS} ))}
|
||||
|
||||
cat >>"$envfile" <<EOL
|
||||
export MAX_JOBS="${MAX_JOBS}"
|
||||
EOL
|
||||
fi
|
||||
|
||||
if [[ -z "${IS_GHA:-}" ]]; then
|
||||
cat >>"$envfile" <<EOL
|
||||
export workdir="$workdir"
|
||||
export MAC_PACKAGE_WORK_DIR="$workdir"
|
||||
if [[ "$OSTYPE" == "msys" ]]; then
|
||||
export PYTORCH_ROOT="$workdir/p"
|
||||
export BUILDER_ROOT="$workdir/b"
|
||||
else
|
||||
export PYTORCH_ROOT="$workdir/pytorch"
|
||||
export BUILDER_ROOT="$workdir/builder"
|
||||
fi
|
||||
export MINICONDA_ROOT="$workdir/miniconda"
|
||||
export PYTORCH_FINAL_PACKAGE_DIR="$workdir/final_pkgs"
|
||||
|
||||
export CIRCLE_TAG="${CIRCLE_TAG:-}"
|
||||
export CIRCLE_SHA1="$CIRCLE_SHA1"
|
||||
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
|
||||
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
|
||||
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
||||
EOL
|
||||
fi
|
||||
|
||||
echo 'retry () {' >> "$envfile"
|
||||
echo ' $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)' >> "$envfile"
|
||||
echo '}' >> "$envfile"
|
||||
|
||||
@ -63,10 +63,6 @@ s3_upload() {
|
||||
)
|
||||
}
|
||||
|
||||
# Install dependencies (should be a no-op if previously installed)
|
||||
conda install -yq anaconda-client
|
||||
pip install -q awscli
|
||||
|
||||
case "${PACKAGE_TYPE}" in
|
||||
conda)
|
||||
conda_upload
|
||||
|
||||
@ -1,23 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
source "${BINARY_ENV_FILE:-/c/w/env}"
|
||||
source "/c/w/env"
|
||||
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR"
|
||||
|
||||
export CUDA_VERSION="${DESIRED_CUDA/cu/}"
|
||||
export USE_SCCACHE=1
|
||||
export SCCACHE_BUCKET=ossci-compiler-cache-windows
|
||||
export SCCACHE_IGNORE_SERVER_IO_ERROR=1
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
export VC_YEAR=2019
|
||||
|
||||
if [[ "${DESIRED_CUDA}" == *"cu11"* ]]; then
|
||||
export BUILD_SPLIT_CUDA=ON
|
||||
if [[ "${DESIRED_CUDA}" == "cu111" || "${DESIRED_CUDA}" == "cu113" ]]; then
|
||||
export BUILD_SPLIT_CUDA="ON"
|
||||
fi
|
||||
|
||||
|
||||
echo "Free Space for CUDA DEBUG BUILD"
|
||||
if [[ "${CIRCLECI:-}" == 'true' ]]; then
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
if [[ "$CIRCLECI" == 'true' ]]; then
|
||||
if [[ -d "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community" ]]; then
|
||||
rm -rf "C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community"
|
||||
fi
|
||||
@ -49,20 +47,23 @@ if [[ "${CIRCLECI:-}" == 'true' ]]; then
|
||||
if [[ -d "C:\\Program Files (x86)\\Google" ]]; then
|
||||
rm -rf "C:\\Program Files (x86)\\Google"
|
||||
fi
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}
|
||||
set -x
|
||||
if [[ -d "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances" ]]; then
|
||||
mv "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances" .
|
||||
rm -rf "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||
mkdir -p "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||
mv _Instances "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||
fi
|
||||
if [[ -d "C:\\Microsoft" ]]; then
|
||||
# don't use quotes here
|
||||
rm -rf /c/Microsoft/AndroidNDK*
|
||||
fi
|
||||
fi
|
||||
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}
|
||||
set -x
|
||||
|
||||
if [[ "$CIRCLECI" == 'true' && -d "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances" ]]; then
|
||||
mv "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances" .
|
||||
rm -rf "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||
mkdir -p "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||
mv _Instances "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||
fi
|
||||
|
||||
if [[ "$CIRCLECI" == 'true' && -d "C:\\Microsoft" ]]; then
|
||||
# don't use quotes here
|
||||
rm -rf /c/Microsoft/AndroidNDK*
|
||||
fi
|
||||
|
||||
echo "Free space on filesystem before build:"
|
||||
@ -70,10 +71,9 @@ df -h
|
||||
|
||||
pushd "$BUILDER_ROOT"
|
||||
if [[ "$PACKAGE_TYPE" == 'conda' ]]; then
|
||||
./windows/internal/build_conda.bat
|
||||
./windows/internal/build_conda.bat
|
||||
elif [[ "$PACKAGE_TYPE" == 'wheel' || "$PACKAGE_TYPE" == 'libtorch' ]]; then
|
||||
export NIGHTLIES_PYTORCH_ROOT="$PYTORCH_ROOT"
|
||||
./windows/internal/build_wheels.bat
|
||||
./windows/internal/build_wheels.bat
|
||||
fi
|
||||
|
||||
echo "Free space on filesystem after build:"
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
source "${BINARY_ENV_FILE:-/c/w/env}"
|
||||
source "/c/w/env"
|
||||
|
||||
export CUDA_VERSION="${DESIRED_CUDA/cu/}"
|
||||
export VC_YEAR=2019
|
||||
|
||||
@ -34,9 +34,9 @@ echo "error: cpp_doc_push_script.sh: install_path (arg1) not specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
is_main_doc=false
|
||||
is_master_doc=false
|
||||
if [ "$version" == "master" ]; then
|
||||
is_main_doc=true
|
||||
is_master_doc=true
|
||||
fi
|
||||
|
||||
echo "install_path: $install_path version: $version"
|
||||
@ -56,7 +56,7 @@ sudo apt-get -y install doxygen
|
||||
# Generate ATen files
|
||||
pushd "${pt_checkout}"
|
||||
pip install -r requirements.txt
|
||||
time python -m torchgen.gen \
|
||||
time python -m tools.codegen.gen \
|
||||
-s aten/src/ATen \
|
||||
-d build/aten/src/ATen
|
||||
|
||||
@ -65,8 +65,9 @@ cp torch/_utils_internal.py tools/shared
|
||||
|
||||
# Generate PyTorch files
|
||||
time python tools/setup_helpers/generate_code.py \
|
||||
--declarations-path build/aten/src/ATen/Declarations.yaml \
|
||||
--native-functions-path aten/src/ATen/native/native_functions.yaml \
|
||||
--tags-path aten/src/ATen/native/tags.yaml
|
||||
--nn-path aten/src/
|
||||
|
||||
# Build the docs
|
||||
pushd docs/cpp
|
||||
@ -96,12 +97,8 @@ git status
|
||||
git config user.email "soumith+bot@pytorch.org"
|
||||
git config user.name "pytorchbot"
|
||||
# If there aren't changes, don't make a commit; push is no-op
|
||||
git commit -m "Generate C++ docs from pytorch/pytorch@${GITHUB_SHA}" || true
|
||||
git commit -m "Generate C++ docs from pytorch/pytorch@$CIRCLE_SHA1" || true
|
||||
git status
|
||||
|
||||
if [[ "${WITH_PUSH:-}" == true ]]; then
|
||||
git push -u origin
|
||||
fi
|
||||
|
||||
popd
|
||||
# =================== The above code **should** be executed inside Docker container ===================
|
||||
|
||||
@ -37,9 +37,9 @@ echo "error: python_doc_push_script.sh: install_path (arg1) not specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
is_main_doc=false
|
||||
is_master_doc=false
|
||||
if [ "$version" == "master" ]; then
|
||||
is_main_doc=true
|
||||
is_master_doc=true
|
||||
fi
|
||||
|
||||
# Argument 3: The branch to push to. Usually is "site"
|
||||
@ -86,7 +86,7 @@ pushd docs
|
||||
|
||||
# Build the docs
|
||||
pip -q install -r requirements.txt
|
||||
if [ "$is_main_doc" = true ]; then
|
||||
if [ "$is_master_doc" = true ]; then
|
||||
build_docs html
|
||||
[ $? -eq 0 ] || exit $?
|
||||
make coverage
|
||||
@ -131,12 +131,8 @@ git status
|
||||
git config user.email "soumith+bot@pytorch.org"
|
||||
git config user.name "pytorchbot"
|
||||
# If there aren't changes, don't make a commit; push is no-op
|
||||
git commit -m "Generate Python docs from pytorch/pytorch@${GITHUB_SHA}" || true
|
||||
git commit -m "Generate Python docs from pytorch/pytorch@$CIRCLE_SHA1" || true
|
||||
git status
|
||||
|
||||
if [[ "${WITH_PUSH:-}" == true ]]; then
|
||||
git push -u origin "${branch}"
|
||||
fi
|
||||
|
||||
popd
|
||||
# =================== The above code **should** be executed inside Docker container ===================
|
||||
|
||||
@ -32,7 +32,7 @@ if ! command -v aws >/dev/null; then
|
||||
fi
|
||||
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-510.60.02.run"
|
||||
DRIVER_FN="NVIDIA-Linux-x86_64-460.39.run"
|
||||
wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
|
||||
sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
||||
nvidia-smi
|
||||
|
||||
@ -11,7 +11,7 @@ AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/"
|
||||
AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRET", "")
|
||||
PIPELINE_ID = "911"
|
||||
PROJECT_ID = "0628bce4-2d33-499e-bac5-530e12db160f"
|
||||
TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "main")
|
||||
TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "master")
|
||||
TARGET_COMMIT = os.environ.get("CIRCLE_SHA1", "")
|
||||
|
||||
build_base_url = AZURE_PIPELINE_BASE_URL + "_apis/build/builds?api-version=6.0"
|
||||
|
||||
@ -2,18 +2,22 @@
|
||||
set -eux -o pipefail
|
||||
|
||||
case ${CUDA_VERSION} in
|
||||
10.1)
|
||||
cuda_installer_name="cuda_10.1.243_426.00_win10"
|
||||
cuda_install_packages="nvcc_10.1 cuobjdump_10.1 nvprune_10.1 cupti_10.1 cublas_10.1 cublas_dev_10.1 cudart_10.1 cufft_10.1 cufft_dev_10.1 curand_10.1 curand_dev_10.1 cusolver_10.1 cusolver_dev_10.1 cusparse_10.1 cusparse_dev_10.1 nvgraph_10.1 nvgraph_dev_10.1 npp_10.1 npp_dev_10.1 nvrtc_10.1 nvrtc_dev_10.1 nvml_dev_10.1"
|
||||
;;
|
||||
10.2)
|
||||
cuda_installer_name="cuda_10.2.89_441.22_win10"
|
||||
cuda_install_packages="nvcc_10.2 cuobjdump_10.2 nvprune_10.2 cupti_10.2 cublas_10.2 cublas_dev_10.2 cudart_10.2 cufft_10.2 cufft_dev_10.2 curand_10.2 curand_dev_10.2 cusolver_10.2 cusolver_dev_10.2 cusparse_10.2 cusparse_dev_10.2 nvgraph_10.2 nvgraph_dev_10.2 npp_10.2 npp_dev_10.2 nvrtc_10.2 nvrtc_dev_10.2 nvml_dev_10.2"
|
||||
;;
|
||||
11.1)
|
||||
cuda_installer_name="cuda_11.1.0_456.43_win10"
|
||||
cuda_install_packages="nvcc_11.1 cuobjdump_11.1 nvprune_11.1 nvprof_11.1 cupti_11.1 cublas_11.1 cublas_dev_11.1 cudart_11.1 cufft_11.1 cufft_dev_11.1 curand_11.1 curand_dev_11.1 cusolver_11.1 cusolver_dev_11.1 cusparse_11.1 cusparse_dev_11.1 npp_11.1 npp_dev_11.1 nvrtc_11.1 nvrtc_dev_11.1 nvml_dev_11.1"
|
||||
;;
|
||||
11.3)
|
||||
cuda_installer_name="cuda_11.3.0_465.89_win10"
|
||||
cuda_install_packages="thrust_11.3 nvcc_11.3 cuobjdump_11.3 nvprune_11.3 nvprof_11.3 cupti_11.3 cublas_11.3 cublas_dev_11.3 cudart_11.3 cufft_11.3 cufft_dev_11.3 curand_11.3 curand_dev_11.3 cusolver_11.3 cusolver_dev_11.3 cusparse_11.3 cusparse_dev_11.3 npp_11.3 npp_dev_11.3 nvrtc_11.3 nvrtc_dev_11.3 nvml_dev_11.3"
|
||||
;;
|
||||
11.6)
|
||||
cuda_installer_name="cuda_11.6.0_511.23_windows"
|
||||
cuda_install_packages="thrust_11.6 nvcc_11.6 cuobjdump_11.6 nvprune_11.6 nvprof_11.6 cupti_11.6 cublas_11.6 cublas_dev_11.6 cudart_11.6 cufft_11.6 cufft_dev_11.6 curand_11.6 curand_dev_11.6 cusolver_11.6 cusolver_dev_11.6 cusparse_11.6 cusparse_dev_11.6 npp_11.6 npp_dev_11.6 nvrtc_11.6 nvrtc_dev_11.6 nvml_dev_11.6"
|
||||
;;
|
||||
*)
|
||||
echo "CUDA_VERSION $CUDA_VERSION is not supported yet"
|
||||
exit 1
|
||||
|
||||
@ -1,20 +1,23 @@
|
||||
#!/bin/bash
|
||||
set -eux -o pipefail
|
||||
|
||||
|
||||
windows_s3_link="https://ossci-windows.s3.amazonaws.com"
|
||||
# This is typically blank but for CUDA 10* it'll be set to 10
|
||||
windows_version_qualifier=""
|
||||
|
||||
case ${CUDA_VERSION} in
|
||||
10.1)
|
||||
archive_version="v7.6.4.38"
|
||||
windows_version_qualifier="10"
|
||||
;;
|
||||
10.2)
|
||||
cudnn_file_name="cudnn-${CUDA_VERSION}-windows10-x64-v7.6.5.32"
|
||||
archive_version="v7.6.5.32"
|
||||
windows_version_qualifier="10"
|
||||
;;
|
||||
11.1)
|
||||
archive_version="v8.0.5.39"
|
||||
;;
|
||||
11.3)
|
||||
# Use cudnn8.3 with hard-coded cuda11.3 version
|
||||
cudnn_file_name="cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive"
|
||||
;;
|
||||
11.6)
|
||||
# Use cudnn8.3 with hard-coded cuda11.5 version
|
||||
cudnn_file_name="cudnn-windows-x86_64-8.3.2.44_cuda11.5-archive"
|
||||
archive_version="v8.2.0.53"
|
||||
;;
|
||||
*)
|
||||
echo "CUDA_VERSION: ${CUDA_VERSION} not supported yet"
|
||||
@ -23,7 +26,7 @@ case ${CUDA_VERSION} in
|
||||
esac
|
||||
|
||||
cudnn_installer_name="cudnn_installer.zip"
|
||||
cudnn_installer_link="${windows_s3_link}/${cudnn_file_name}.zip"
|
||||
cudnn_installer_link="https://ossci-windows.s3.amazonaws.com/cudnn-${CUDA_VERSION}-windows${windows_version_qualifier}-x64-${archive_version}.zip"
|
||||
cudnn_install_folder="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/"
|
||||
|
||||
if [[ -f "${cudnn_install_folder}/include/cudnn.h" ]]; then
|
||||
@ -38,11 +41,6 @@ else
|
||||
# Remove all of the directories before attempting to copy files
|
||||
rm -rf "${cudnn_install_folder:?}/*"
|
||||
cp -rf cudnn/cuda/* "${cudnn_install_folder}"
|
||||
|
||||
#Make sure windows path contains zlib dll
|
||||
curl -k -L "${windows_s3_link}/zlib123dllx64.zip" --output "${tmp_dir}\zlib123dllx64.zip"
|
||||
7z x "${tmp_dir}\zlib123dllx64.zip" -o"${tmp_dir}\zlib"
|
||||
xcopy /Y "${tmp_dir}\zlib\dll_x64\*.dll" "C:\Windows\System32"
|
||||
)
|
||||
rm -rf "${tmp_dir}"
|
||||
fi
|
||||
|
||||
@ -62,4 +62,5 @@ binary_windows_params: &binary_windows_params
|
||||
default: "windows-xlarge-cpu-with-nvidia-cuda"
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: << parameters.build_environment >>
|
||||
BUILD_FOR_SYSTEM: windows
|
||||
JOB_EXECUTOR: <<parameters.executor>>
|
||||
|
||||
@ -0,0 +1,14 @@
|
||||
|
||||
promote_common: &promote_common
|
||||
docker:
|
||||
- image: pytorch/release
|
||||
parameters:
|
||||
package_name:
|
||||
description: "package name to promote"
|
||||
type: string
|
||||
default: ""
|
||||
environment:
|
||||
PACKAGE_NAME: << parameters.package_name >>
|
||||
ANACONDA_API_TOKEN: ${CONDA_PYTORCHBOT_TOKEN}
|
||||
AWS_ACCESS_KEY_ID: ${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}
|
||||
AWS_SECRET_ACCESS_KEY: ${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}
|
||||
@ -26,6 +26,24 @@ pytorch_params: &pytorch_params
|
||||
CI_MASTER: << pipeline.parameters.run_master_build >>
|
||||
resource_class: << parameters.resource_class >>
|
||||
|
||||
pytorch_android_params: &pytorch_android_params
|
||||
parameters:
|
||||
build_environment:
|
||||
type: string
|
||||
default: ""
|
||||
op_list:
|
||||
type: string
|
||||
default: ""
|
||||
lite_interpreter:
|
||||
type: string
|
||||
default: "1"
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c"
|
||||
PYTHON_VERSION: "3.6"
|
||||
SELECTED_OP_LIST: << parameters.op_list >>
|
||||
BUILD_LITE_INTERPRETER: << parameters.lite_interpreter >>
|
||||
|
||||
pytorch_ios_params: &pytorch_ios_params
|
||||
parameters:
|
||||
build_environment:
|
||||
|
||||
@ -3,12 +3,12 @@
|
||||
# binary_linux_libtorch_3.6m_cpu_test:
|
||||
# environment:
|
||||
# BUILD_ENVIRONMENT: "libtorch 3.6m cpu"
|
||||
# resource_class: gpu.nvidia.small
|
||||
# resource_class: gpu.medium
|
||||
# <<: *binary_linux_test
|
||||
#
|
||||
# binary_linux_libtorch_3.6m_cu90_test:
|
||||
# environment:
|
||||
# BUILD_ENVIRONMENT: "libtorch 3.6m cu90"
|
||||
# resource_class: gpu.nvidia.small
|
||||
# resource_class: gpu.medium
|
||||
# <<: *binary_linux_test
|
||||
#
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
jobs:
|
||||
binary_linux_build:
|
||||
<<: *binary_linux_build_params
|
||||
steps:
|
||||
@ -162,7 +161,6 @@ jobs:
|
||||
<<: *binary_mac_params
|
||||
macos:
|
||||
xcode: "12.0"
|
||||
resource_class: "large"
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- checkout
|
||||
|
||||
@ -54,3 +54,61 @@
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
set -x
|
||||
cd .circleci/docker && ./build_docker.sh
|
||||
docker_for_ecr_gc_build_job:
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: build_docker_image_for_ecr_gc
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
cd .circleci/ecr_gc_docker
|
||||
docker build . -t 308535385114.dkr.ecr.us-east-1.amazonaws.com/gc/ecr
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
||||
export AWS_REGION=us-east-1
|
||||
aws ecr get-login-password --region $AWS_REGION|docker login --username AWS \
|
||||
--password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
|
||||
set -x
|
||||
docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/gc/ecr
|
||||
ecr_gc_job:
|
||||
parameters:
|
||||
project:
|
||||
type: string
|
||||
default: "pytorch"
|
||||
tags_to_keep: # comma separate values
|
||||
type: string
|
||||
environment:
|
||||
PROJECT: << parameters.project >>
|
||||
# TODO: Remove legacy image tags once we feel comfortable with new docker image tags
|
||||
IMAGE_TAG: << parameters.tags_to_keep >>
|
||||
docker:
|
||||
- image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/gc/ecr
|
||||
aws_auth:
|
||||
aws_access_key_id: ${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
aws_secret_access_key: ${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
# NOTE: see 'docker_build_job' for how these tags actually get built
|
||||
name: dynamically generate tags to keep
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
GENERATED_IMAGE_TAG=$(\
|
||||
git log --oneline --pretty='%H' .circleci/docker \
|
||||
| xargs -I '{}' git rev-parse '{}:.circleci/docker' \
|
||||
| paste -sd "," -)
|
||||
echo "export GENERATED_IMAGE_TAG='${GENERATED_IMAGE_TAG}'" >> ${BASH_ENV}
|
||||
- run:
|
||||
name: garbage collecting for ecr images
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||
set -x
|
||||
/usr/bin/gc.py --filter-prefix ${PROJECT} --ignore-tags "${IMAGE_TAG},${GENERATED_IMAGE_TAG}"
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
parameters:
|
||||
branch:
|
||||
type: string
|
||||
default: "main"
|
||||
default: "master"
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
@ -27,7 +27,7 @@
|
||||
pytorch_python_doc_build:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-python-doc-push
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.7-gcc5.4"
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
@ -43,9 +43,9 @@
|
||||
set -ex
|
||||
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}:build-${DOCKER_TAG}-${CIRCLE_SHA1}
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
# turn v1.12.0rc3 into 1.12
|
||||
tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9]*\.[0-9]*\).*/\1/')
|
||||
target=${tag:-main}
|
||||
# turn v1.12.0rc3 into 1.12.0
|
||||
tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9.]*\).*/\1/')
|
||||
target=${tag:-master}
|
||||
echo "building for ${target}"
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
@ -55,7 +55,7 @@
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
mkdir -p ~/workspace/build_artifacts
|
||||
docker cp $id:/var/lib/jenkins/workspace/pytorch.github.io/docs/main ~/workspace/build_artifacts
|
||||
docker cp $id:/var/lib/jenkins/workspace/pytorch.github.io/docs/master ~/workspace/build_artifacts
|
||||
docker cp $id:/var/lib/jenkins/workspace/pytorch.github.io /tmp/workspace
|
||||
|
||||
# Save the docs build so we can debug any problems
|
||||
@ -67,13 +67,13 @@
|
||||
paths:
|
||||
- .
|
||||
- store_artifacts:
|
||||
path: ~/workspace/build_artifacts/main
|
||||
path: ~/workspace/build_artifacts/master
|
||||
destination: docs
|
||||
|
||||
pytorch_cpp_doc_build:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-cpp-doc-push
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.7-gcc5.4"
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
@ -89,14 +89,15 @@
|
||||
set -ex
|
||||
export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}:build-${DOCKER_TAG}-${CIRCLE_SHA1}
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
# turn v1.12.0rc3 into 1.12
|
||||
tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9]*\.[0-9]*\).*/\1/')
|
||||
target=${tag:-main}
|
||||
# turn v1.12.0rc3 into 1.12.0
|
||||
tag=$(echo $CIRCLE_TAG | sed -e 's/v*\([0-9.]*\).*/\1/')
|
||||
tag=${CIRCLE_TAG:1:5}
|
||||
target=${tag:-master}
|
||||
echo "building for ${target}"
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
|
||||
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && '"export CIRCLE_SHA1='$CIRCLE_SHA1'"' && . ./.circleci/scripts/cpp_doc_push_script.sh docs/"$target" main") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && '"export CIRCLE_SHA1='$CIRCLE_SHA1'"' && . ./.circleci/scripts/cpp_doc_push_script.sh docs/"$target" master") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
@ -212,7 +213,7 @@
|
||||
command: |
|
||||
set -ex
|
||||
source /Users/distiller/workspace/miniconda3/bin/activate
|
||||
python3 -m pip install boto3==1.19.12
|
||||
pip install boto3
|
||||
|
||||
export IN_CI=1
|
||||
export JOB_BASE_NAME=$CIRCLE_JOB
|
||||
@ -252,7 +253,7 @@
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c"
|
||||
PYTHON_VERSION: "3.7"
|
||||
PYTHON_VERSION: "3.6"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
@ -341,7 +342,7 @@
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-publish-snapshot
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c"
|
||||
PYTHON_VERSION: "3.7"
|
||||
PYTHON_VERSION: "3.6"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
@ -377,7 +378,7 @@
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build-only-x86_32
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c"
|
||||
PYTHON_VERSION: "3.7"
|
||||
PYTHON_VERSION: "3.6"
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
@ -415,6 +416,43 @@
|
||||
path: ~/workspace/build_android_x86_32_artifacts/artifacts.tgz
|
||||
destination: artifacts.tgz
|
||||
|
||||
pytorch_android_gradle_custom_build_single:
|
||||
<<: *pytorch_android_params
|
||||
resource_class: large
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_linux_system_environment
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
name: pytorch android gradle custom build single architecture (for PR)
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
# Unlike other gradle jobs, it's not worth building libtorch in a separate CI job and share via docker, because:
|
||||
# 1) Not shareable: it's custom selective build, which is different from default libtorch mobile build;
|
||||
# 2) Not parallelizable by architecture: it only builds libtorch for one architecture;
|
||||
|
||||
echo "DOCKER_IMAGE: ${DOCKER_IMAGE}:${DOCKER_TAG}"
|
||||
time docker pull ${DOCKER_IMAGE}:${DOCKER_TAG} >/dev/null
|
||||
|
||||
git submodule sync && git submodule update -q --init --recursive --depth 1 --jobs 0
|
||||
VOLUME_MOUNTS="-v /home/circleci/project/:/var/lib/jenkins/workspace"
|
||||
export id=$(docker run --env-file "${BASH_ENV}" ${VOLUME_MOUNTS} --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}:${DOCKER_TAG})
|
||||
|
||||
export COMMAND='((echo "export GRADLE_OFFLINE=1" && echo "export BUILD_LITE_INTERPRETER=${BUILD_LITE_INTERPRETER}" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/build_android_gradle.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
# Skip docker push as this job is purely for size analysis purpose.
|
||||
# Result binaries are already in `/home/circleci/project/` as it's mounted instead of copied.
|
||||
|
||||
- upload_binary_size_for_android_build:
|
||||
build_type: custom-build-single
|
||||
|
||||
pytorch_ios_build:
|
||||
<<: *pytorch_ios_params
|
||||
macos:
|
||||
@ -483,7 +521,6 @@
|
||||
echo "IOS_PLATFORM: ${IOS_PLATFORM}"
|
||||
echo "USE_PYTORCH_METAL": "${USE_METAL}"
|
||||
echo "BUILD_LITE_INTERPRETER": "${BUILD_LITE_INTERPRETER}"
|
||||
echo "USE_COREML_DELEGATE": "${USE_COREML_DELEGATE}"
|
||||
|
||||
#check the custom build flag
|
||||
echo "SELECTED_OP_LIST: ${SELECTED_OP_LIST}"
|
||||
@ -492,7 +529,6 @@
|
||||
fi
|
||||
export IOS_ARCH=${IOS_ARCH}
|
||||
export IOS_PLATFORM=${IOS_PLATFORM}
|
||||
export USE_COREML_DELEGATE=${USE_COREML_DELEGATE}
|
||||
if [ ${IOS_PLATFORM} != "SIMULATOR" ]; then
|
||||
export USE_PYTORCH_METAL=${USE_METAL}
|
||||
fi
|
||||
@ -532,32 +568,20 @@
|
||||
PROJ_ROOT=/Users/distiller/project
|
||||
source ~/anaconda/bin/activate
|
||||
# use the pytorch nightly build to generate models
|
||||
pip3 install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
|
||||
conda install pytorch torchvision -c pytorch-nightly --yes
|
||||
# generate models for differnet backends
|
||||
cd ${PROJ_ROOT}/ios/TestApp/benchmark
|
||||
mkdir -p ../models
|
||||
if [ ${USE_COREML_DELEGATE} == 1 ]; then
|
||||
pip install coremltools==5.0b5
|
||||
pip install six
|
||||
python coreml_backend.py
|
||||
else
|
||||
python trace_model.py
|
||||
fi
|
||||
python trace_model.py
|
||||
if [ ${BUILD_LITE_INTERPRETER} == 1 ]; then
|
||||
echo "Setting up the TestApp for LiteInterpreter"
|
||||
ruby setup.rb --lite 1
|
||||
else
|
||||
echo "Setting up the TestApp for Full JIT"
|
||||
ruby setup.rb
|
||||
fi
|
||||
cd ${PROJ_ROOT}/ios/TestApp
|
||||
instruments -s -devices
|
||||
if [ ${BUILD_LITE_INTERPRETER} == 1 ]; then
|
||||
if [ ${USE_COREML_DELEGATE} == 1 ]; then
|
||||
fastlane scan --only_testing TestAppTests/TestAppTests/testCoreML
|
||||
else
|
||||
fastlane scan --only_testing TestAppTests/TestAppTests/testLiteInterpreter
|
||||
fi
|
||||
fastlane scan --only_testing TestAppTests/TestAppTests/testLiteInterpreter
|
||||
else
|
||||
fastlane scan --only_testing TestAppTests/TestAppTests/testFullJIT
|
||||
fi
|
||||
@ -580,7 +604,7 @@
|
||||
time docker pull ${DOCKER_IMAGE}:${DOCKER_TAG} >/dev/null
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}:${DOCKER_TAG})
|
||||
|
||||
echo "Do NOT merge main branch into $CIRCLE_BRANCH in environment $BUILD_ENVIRONMENT"
|
||||
echo "Do NOT merge master branch into $CIRCLE_BRANCH in environment $BUILD_ENVIRONMENT"
|
||||
|
||||
git submodule sync && git submodule update -q --init --recursive --depth 1 --jobs 0
|
||||
|
||||
@ -659,7 +683,7 @@
|
||||
pytorch_doc_test:
|
||||
environment:
|
||||
BUILD_ENVIRONMENT: pytorch-doc-test
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.7-gcc5.4"
|
||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4"
|
||||
resource_class: medium
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
|
||||
400
.circleci/verbatim-sources/job-specs/pytorch-job-specs.yml
Normal file
400
.circleci/verbatim-sources/job-specs/pytorch-job-specs.yml
Normal file
@ -0,0 +1,400 @@
|
||||
jobs:
|
||||
pytorch_linux_build:
|
||||
<<: *pytorch_params
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_linux_system_environment
|
||||
- optional_merge_target_branch
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "1h"
|
||||
command: |
|
||||
set -e
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"pure_torch"* ]]; then
|
||||
echo 'BUILD_CAFFE2=OFF' >> "${BASH_ENV}"
|
||||
fi
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
echo 'ATEN_THREADING=TBB' >> "${BASH_ENV}"
|
||||
echo 'USE_TBB=1' >> "${BASH_ENV}"
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
echo 'ATEN_THREADING=NATIVE' >> "${BASH_ENV}"
|
||||
fi
|
||||
echo "Parallel backend flags: "${PARALLEL_FLAGS}
|
||||
# Pull Docker image and run build
|
||||
echo "DOCKER_IMAGE: "${DOCKER_IMAGE}:${DOCKER_TAG}
|
||||
time docker pull ${DOCKER_IMAGE}:${DOCKER_TAG} >/dev/null
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}:${DOCKER_TAG})
|
||||
|
||||
git submodule sync && git submodule update -q --init --recursive --depth 1 --jobs 0
|
||||
|
||||
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
|
||||
|
||||
export COMMAND='((echo "sudo chown -R jenkins workspace && export JOB_BASE_NAME="$CIRCLE_JOB" && cd workspace && .jenkins/pytorch/build.sh && find ${BUILD_ROOT} -type f -name "*.a" -or -name "*.o" -delete") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||
|
||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||
|
||||
# Copy dist folder back
|
||||
docker cp $id:/var/lib/jenkins/workspace/dist /home/circleci/project/. || echo "Dist folder not found"
|
||||
|
||||
# Push intermediate Docker image for next phase to use
|
||||
if [ -z "${BUILD_ONLY}" ]; then
|
||||
# Note [Special build images]
|
||||
# The xla build uses the same docker image as
|
||||
# pytorch_linux_bionic_py3_6_clang9_build. In the push step, we have to
|
||||
# distinguish between them so the test can pick up the correct image.
|
||||
output_image=${DOCKER_IMAGE}:build-${DOCKER_TAG}-${CIRCLE_SHA1}
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-xla
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"libtorch"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-libtorch
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-paralleltbb
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-parallelnative
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-x86_64"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-x86_64
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-arm-v7a"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-arm-v7a
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-arm-v8a"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-arm-v8a
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-x86_32"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-x86_32
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"android-ndk-r19c-vulkan-x86_32"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-android-vulkan-x86_32
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"vulkan-linux"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-vulkan
|
||||
else
|
||||
export COMMIT_DOCKER_IMAGE=$output_image
|
||||
fi
|
||||
docker commit "$id" ${COMMIT_DOCKER_IMAGE}
|
||||
time docker push ${COMMIT_DOCKER_IMAGE}
|
||||
fi
|
||||
- run:
|
||||
name: upload build & binary data
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
cd /pytorch && export COMMIT_TIME=$(git log --max-count=1 --format=%ct || echo 0)
|
||||
python3 -mpip install requests && \
|
||||
SCRIBE_GRAPHQL_ACCESS_TOKEN=${SCRIBE_GRAPHQL_ACCESS_TOKEN} \
|
||||
python3 -m tools.stats.upload_binary_size_to_scuba || exit 0
|
||||
- store_artifacts:
|
||||
path: /home/circleci/project/dist
|
||||
|
||||
pytorch_linux_test:
|
||||
<<: *pytorch_params
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
steps:
|
||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||
- checkout
|
||||
- calculate_docker_image_tag
|
||||
- setup_linux_system_environment
|
||||
- setup_ci_environment
|
||||
- run:
|
||||
name: Download Docker image
|
||||
no_output_timeout: "90m"
|
||||
command: |
|
||||
set -e
|
||||
export PYTHONUNBUFFERED=1
|
||||
if [[ "${DOCKER_IMAGE}" == *rocm3.9* ]]; then
|
||||
export DOCKER_TAG="f3d89a32912f62815e4feaeed47e564e887dffd6"
|
||||
fi
|
||||
# See Note [Special build images]
|
||||
output_image=${DOCKER_IMAGE}:build-${DOCKER_TAG}-${CIRCLE_SHA1}
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"xla"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-xla
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"libtorch"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-libtorch
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-paralleltbb
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-parallelnative
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"vulkan-linux"* ]]; then
|
||||
export COMMIT_DOCKER_IMAGE=$output_image-vulkan
|
||||
else
|
||||
export COMMIT_DOCKER_IMAGE=$output_image
|
||||
fi
|
||||
echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE}
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then
|
||||
echo 'ATEN_THREADING=TBB' >> "${BASH_ENV}"
|
||||
echo 'USE_TBB=1' >> "${BASH_ENV}"
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then
|
||||
echo 'ATEN_THREADING=NATIVE' >> "${BASH_ENV}"
|
||||
fi
|
||||
echo "Parallel backend flags: "${PARALLEL_FLAGS}
|
||||
|
||||
time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null
|
||||
|
||||
# TODO: Make this less painful
|
||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --gpus all --shm-size=2g -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *"rocm"* ]]; then
|
||||
hostname
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --shm-size=8g --ipc=host --device /dev/kfd --device /dev/dri --group-add video -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
else
|
||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --shm-size=1g --ipc=host -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE})
|
||||
fi
|
||||
echo "id=${id}" >> "${BASH_ENV}"
|
||||
|
||||
- run:
|
||||
name: Check for no AVX instruction by default
|
||||
no_output_timeout: "20m"
|
||||
command: |
|
||||
set -e
|
||||
is_vanilla_build() {
|
||||
if [ "${BUILD_ENVIRONMENT}" == "pytorch-linux-bionic-py3.6-clang9-test" ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ "${BUILD_ENVIRONMENT}" == "pytorch-linux-xenial-py3.6-gcc5.4-test" ]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
if is_vanilla_build; then
|
||||
echo "apt-get update || apt-get install libgnutls30" | docker exec -u root -i "$id" bash
|
||||
echo "apt-get install -y qemu-user gdb" | docker exec -u root -i "$id" bash
|
||||
echo "cd workspace/build; qemu-x86_64 -g 2345 -cpu Broadwell -E ATEN_CPU_CAPABILITY=default ./bin/basic --gtest_filter=BasicTest.BasicTestCPU & gdb ./bin/basic -ex 'set pagination off' -ex 'target remote :2345' -ex 'continue' -ex 'bt' -ex='set confirm off' -ex 'quit \$_isvoid(\$_exitcode)'" | docker exec -u jenkins -i "$id" bash
|
||||
else
|
||||
echo "Skipping for ${BUILD_ENVIRONMENT}"
|
||||
fi
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "90m"
|
||||
command: |
|
||||
set -e
|
||||
|
||||
cat >docker_commands.sh \<<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
set -ex
|
||||
export SCRIBE_GRAPHQL_ACCESS_TOKEN="${SCRIBE_GRAPHQL_ACCESS_TOKEN}"
|
||||
export JOB_BASE_NAME="$CIRCLE_JOB"
|
||||
# temporary fix for https://github.com/pytorch/pytorch/issues/60746
|
||||
if [ -z "$CIRCLE_PR_NUMBER" ]; then
|
||||
if [[ $CIRCLE_BRANCH =~ .*pull.* ]]; then
|
||||
export PR_NUMBER="$(echo $CIRCLE_BRANCH | sed 's/[^0-9]//g')"
|
||||
export CIRCLE_PR_NUMBER="$PR_NUMBER"
|
||||
fi
|
||||
else
|
||||
export PR_NUMBER="$CIRCLE_PR_NUMBER"
|
||||
fi
|
||||
${PARALLEL_FLAGS}
|
||||
cd workspace
|
||||
EOL
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"multigpu"* ]]; then
|
||||
echo ".jenkins/pytorch/multigpu-test.sh" >> docker_commands.sh
|
||||
elif [[ ${BUILD_ENVIRONMENT} == *onnx* ]]; then
|
||||
echo "pip install click mock tabulate networkx==2.0" >> docker_commands.sh
|
||||
echo "pip -q install --user \"file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx\"" >> docker_commands.sh
|
||||
echo ".jenkins/caffe2/test.sh" >> docker_commands.sh
|
||||
else
|
||||
echo ".jenkins/pytorch/test.sh" >> docker_commands.sh
|
||||
fi
|
||||
echo "(cat docker_commands.sh | docker exec -u jenkins -i "$id" bash) 2>&1" > command.sh
|
||||
unbuffer bash command.sh | ts
|
||||
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* ]]; then
|
||||
echo "Retrieving C++ coverage report"
|
||||
docker cp $id:/var/lib/jenkins/workspace/build/coverage.info ./test
|
||||
fi
|
||||
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* || ${BUILD_ENVIRONMENT} == *"onnx"* ]]; then
|
||||
echo "Retrieving Python coverage report"
|
||||
docker cp $id:/var/lib/jenkins/workspace/test/.coverage ./test
|
||||
docker cp $id:/var/lib/jenkins/workspace/test/coverage.xml ./test
|
||||
python3 -mpip install codecov
|
||||
python3 -mcodecov
|
||||
fi
|
||||
- run:
|
||||
name: Report results
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
set -e
|
||||
# Retrieving test results should be done as very first step as command never fails
|
||||
# But is always executed if previous step fails for some reason
|
||||
echo "Retrieving test reports"
|
||||
docker cp $id:/var/lib/jenkins/workspace/test/test-reports ./ || echo 'No test reports found!'
|
||||
docker stats --all --no-stream
|
||||
|
||||
cat >docker_commands.sh \<<EOL
|
||||
# =================== The following code will be executed inside Docker container ===================
|
||||
set -ex
|
||||
export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}
|
||||
export SCRIBE_GRAPHQL_ACCESS_TOKEN="${SCRIBE_GRAPHQL_ACCESS_TOKEN}"
|
||||
export CIRCLE_TAG="${CIRCLE_TAG:-}"
|
||||
export CIRCLE_SHA1="$CIRCLE_SHA1"
|
||||
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
|
||||
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
|
||||
export JOB_BASE_NAME="$CIRCLE_JOB"
|
||||
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
||||
cd workspace
|
||||
python -m tools.stats.print_test_stats --upload-to-s3 --compare-with-s3 test
|
||||
EOL
|
||||
echo "(cat docker_commands.sh | docker exec -u jenkins -e LANG=C.UTF-8 -i "$id" bash) 2>&1" > command.sh
|
||||
unbuffer bash command.sh | ts
|
||||
when: always
|
||||
- store_test_results:
|
||||
path: test-reports
|
||||
- store_artifacts:
|
||||
path: test/.coverage
|
||||
- store_artifacts:
|
||||
path: test/coverage.xml
|
||||
|
||||
pytorch_windows_build:
|
||||
<<: *pytorch_windows_params
|
||||
parameters:
|
||||
executor:
|
||||
type: string
|
||||
default: "windows-xlarge-cpu-with-nvidia-cuda"
|
||||
build_environment:
|
||||
type: string
|
||||
default: ""
|
||||
test_name:
|
||||
type: string
|
||||
default: ""
|
||||
cuda_version:
|
||||
type: string
|
||||
default: "10.1"
|
||||
python_version:
|
||||
type: string
|
||||
default: "3.8"
|
||||
vs_version:
|
||||
type: string
|
||||
default: "16.8.6"
|
||||
vc_version:
|
||||
type: string
|
||||
default: "14.16"
|
||||
vc_year:
|
||||
type: string
|
||||
default: "2019"
|
||||
vc_product:
|
||||
type: string
|
||||
default: "BuildTools"
|
||||
use_cuda:
|
||||
type: string
|
||||
default: ""
|
||||
executor: <<parameters.executor>>
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install VS2019 toolchain
|
||||
no_output_timeout: 10m
|
||||
command: |
|
||||
powershell .circleci/scripts/vs_install.ps1
|
||||
- run:
|
||||
name: Install Cuda
|
||||
no_output_timeout: 30m
|
||||
command: |
|
||||
if [[ "${USE_CUDA}" == "1" ]]; then
|
||||
.circleci/scripts/windows_cuda_install.sh
|
||||
fi
|
||||
- run:
|
||||
name: Install Cudnn
|
||||
command : |
|
||||
if [[ "${USE_CUDA}" == "1" ]]; then
|
||||
.circleci/scripts/windows_cudnn_install.sh
|
||||
fi
|
||||
- run:
|
||||
name: Build
|
||||
no_output_timeout: "90m"
|
||||
command: |
|
||||
set -e
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_WIN_BUILD_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
|
||||
set -x
|
||||
.jenkins/pytorch/win-build.sh
|
||||
- persist_to_workspace:
|
||||
root: "C:/w"
|
||||
paths: build-results
|
||||
- store_artifacts:
|
||||
path: C:/w/build-results
|
||||
|
||||
pytorch_windows_test:
|
||||
<<: *pytorch_windows_params
|
||||
parameters:
|
||||
executor:
|
||||
type: string
|
||||
default: "windows-medium-cpu-with-nvidia-cuda"
|
||||
build_environment:
|
||||
type: string
|
||||
default: ""
|
||||
test_name:
|
||||
type: string
|
||||
default: ""
|
||||
cuda_version:
|
||||
type: string
|
||||
default: "10.1"
|
||||
python_version:
|
||||
type: string
|
||||
default: "3.8"
|
||||
vs_version:
|
||||
type: string
|
||||
default: "16.8.6"
|
||||
vc_version:
|
||||
type: string
|
||||
default: "14.16"
|
||||
vc_year:
|
||||
type: string
|
||||
default: "2019"
|
||||
vc_product:
|
||||
type: string
|
||||
default: "BuildTools"
|
||||
use_cuda:
|
||||
type: string
|
||||
default: ""
|
||||
executor: <<parameters.executor>>
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: c:/users/circleci/workspace
|
||||
- run:
|
||||
name: Install VS2019 toolchain
|
||||
no_output_timeout: 10m
|
||||
command: |
|
||||
powershell .circleci/scripts/vs_install.ps1
|
||||
- run:
|
||||
name: Install Cuda
|
||||
no_output_timeout: 30m
|
||||
command: |
|
||||
if [[ "${CUDA_VERSION}" != "cpu" ]]; then
|
||||
if [[ "${CUDA_VERSION}" != "10" || "${JOB_EXECUTOR}" != "windows-with-nvidia-gpu" ]]; then
|
||||
.circleci/scripts/windows_cuda_install.sh
|
||||
fi
|
||||
fi
|
||||
- run:
|
||||
name: Install Cudnn
|
||||
command : |
|
||||
if [[ "${CUDA_VERSION}" != "cpu" ]]; then
|
||||
.circleci/scripts/windows_cudnn_install.sh
|
||||
fi
|
||||
- run:
|
||||
name: Test
|
||||
no_output_timeout: "30m"
|
||||
command: |
|
||||
set -e
|
||||
export IN_CI=1
|
||||
set +x
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_WIN_BUILD_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
|
||||
set -x
|
||||
.jenkins/pytorch/win-test.sh
|
||||
- run:
|
||||
name: Report results
|
||||
no_output_timeout: "5m"
|
||||
command: |
|
||||
set -ex
|
||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_WIN_BUILD_V1}
|
||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
|
||||
pip install typing_extensions boto3
|
||||
python -m tools.stats.print_test_stats --upload-to-s3 --compare-with-s3 test
|
||||
when: always
|
||||
- store_test_results:
|
||||
path: test/test-reports
|
||||
- store_artifacts:
|
||||
path: test/coverage.xml
|
||||
@ -26,7 +26,6 @@
|
||||
# (smoke tests and upload jobs do not need the pytorch repo).
|
||||
binary_checkout: &binary_checkout
|
||||
name: Checkout pytorch/builder repo
|
||||
no_output_timeout: "30m"
|
||||
command: .circleci/scripts/binary_checkout.sh
|
||||
|
||||
# Parses circleci arguments in a consistent way, essentially routing to the
|
||||
|
||||
34
.circleci/verbatim-sources/workflows/workflows-ecr-gc.yml
Normal file
34
.circleci/verbatim-sources/workflows/workflows-ecr-gc.yml
Normal file
@ -0,0 +1,34 @@
|
||||
ecr_gc:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "45 * * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
- docker_for_ecr_gc_build_job
|
||||
- ecr_gc_job:
|
||||
name: ecr_gc_job_for_pytorch
|
||||
project: pytorch
|
||||
tags_to_keep: "271,262,256,278,282,291,300,323,327,347,389,401,402,403,405,a8006f9a-272d-4478-b137-d121c6f05c83,6e7b11da-a919-49e5-b2ba-da66e3d4bb0a,f990c76a-a798-42bb-852f-5be5006f8026,e43973a9-9d5a-4138-9181-a08a0fc55e2f,8fcf46ef-4a34-480b-a8ee-b0a30a4d3e59,9a3986fa-7ce7-4a36-a001-3c9bef9892e2,1bc00f11-e0f3-4e5c-859f-15937dd938cd,209062ef-ab58-422a-b295-36c4eed6e906,be76e8fd-44e2-484d-b090-07e0cc3a56f0,fff7795428560442086f7b2bb6004b65245dc11a,ab1632df-fa59-40e6-8c23-98e004f61148"
|
||||
requires:
|
||||
- docker_for_ecr_gc_build_job
|
||||
- ecr_gc_job:
|
||||
name: ecr_gc_job_for_caffe2
|
||||
project: caffe2
|
||||
tags_to_keep: "376,373,369,348,345,336,325,324,315,306,301,287,283,276,273,266,253,248,238,230,213"
|
||||
requires:
|
||||
- docker_for_ecr_gc_build_job
|
||||
- ecr_gc_job:
|
||||
name: ecr_gc_job_for_translate
|
||||
project: translate
|
||||
tags_to_keep: "8"
|
||||
requires:
|
||||
- docker_for_ecr_gc_build_job
|
||||
- ecr_gc_job:
|
||||
name: ecr_gc_job_for_tensorcomp
|
||||
project: tensorcomp
|
||||
tags_to_keep: "34"
|
||||
requires:
|
||||
- docker_for_ecr_gc_build_job
|
||||
46
.circleci/verbatim-sources/workflows/workflows-promote.yml
Normal file
46
.circleci/verbatim-sources/workflows/workflows-promote.yml
Normal file
@ -0,0 +1,46 @@
|
||||
# Promotion workflow
|
||||
promote:
|
||||
jobs:
|
||||
# Requires manual approval by someone in org-member
|
||||
# CircleCI security context
|
||||
- promote_approval:
|
||||
context: org-member
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /v[0-9]+(\.[0-9]+)*/
|
||||
type: approval
|
||||
- promote_s3:
|
||||
context: org-member
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /v[0-9]+(\.[0-9]+)*/
|
||||
name: promote_s3_libtorch
|
||||
package_name: libtorch
|
||||
requires:
|
||||
- promote_approval
|
||||
- promote_s3:
|
||||
context: org-member
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /v[0-9]+(\.[0-9]+)*/
|
||||
name: promote_s3_torch
|
||||
package_name: torch
|
||||
requires:
|
||||
- promote_approval
|
||||
- promote_conda:
|
||||
context: org-member
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /v[0-9]+(\.[0-9]+)*/
|
||||
name: promote_conda_pytorch
|
||||
package_name: pytorch
|
||||
requires:
|
||||
- promote_approval
|
||||
@ -0,0 +1,37 @@
|
||||
# the following clones pytorch_linux_xenial_cuda10_2_cudnn7_py3_gcc7's tests but enables
|
||||
# slow tests and sets an environment variable so gradcheck runs with fast_mode=False
|
||||
slow-gradcheck-scheduled-ci:
|
||||
triggers:
|
||||
- schedule:
|
||||
# runs every 8 hours on the 45th minute
|
||||
cron: "45 0,8,16 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
- docker_build_job:
|
||||
name: "docker-pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
image_name: "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
- pytorch_linux_build:
|
||||
name: periodic_pytorch_xenial_cuda10_2_cudnn7_gcc7_build
|
||||
requires:
|
||||
- "docker-pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
build_environment: "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7-build"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
- pytorch_linux_test:
|
||||
name: periodic_pytorch_xenial_cuda10_2_cudnn7_gcc7_old_gradcheck_test1
|
||||
requires:
|
||||
- periodic_pytorch_xenial_cuda10_2_cudnn7_gcc7_build
|
||||
build_environment: "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7-old-gradcheck-test1"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
use_cuda_docker_runtime: "1"
|
||||
resource_class: gpu.medium
|
||||
- pytorch_linux_test:
|
||||
name: periodic_pytorch_xenial_cuda10_2_cudnn7_gcc7_old_gradcheck_test2
|
||||
requires:
|
||||
- periodic_pytorch_xenial_cuda10_2_cudnn7_gcc7_build
|
||||
build_environment: "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7-old-gradcheck-test2"
|
||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
||||
use_cuda_docker_runtime: "1"
|
||||
resource_class: gpu.medium
|
||||
@ -33,12 +33,11 @@ modernize-*,
|
||||
-modernize-use-default-member-init,
|
||||
-modernize-use-using,
|
||||
-modernize-use-trailing-return-type,
|
||||
-modernize-use-nodiscard,
|
||||
performance-*,
|
||||
-performance-noexcept-move-constructor,
|
||||
-performance-unnecessary-value-param,
|
||||
'
|
||||
HeaderFilterRegex: 'torch/csrc/(?!deploy/interpreter/cpython).*'
|
||||
HeaderFilterRegex: 'torch/csrc/.*'
|
||||
AnalyzeTemporaryDtors: false
|
||||
WarningsAsErrors: '*'
|
||||
CheckOptions:
|
||||
|
||||
1
.flake8
1
.flake8
@ -16,6 +16,7 @@ per-file-ignores = __init__.py: F401 torch/utils/cpp_extension.py: B950
|
||||
optional-ascii-coding = True
|
||||
exclude =
|
||||
./.git,
|
||||
./build_code_analyzer,
|
||||
./build_test_custom_build,
|
||||
./build,
|
||||
./caffe2,
|
||||
|
||||
@ -1,24 +0,0 @@
|
||||
# 2020-11-12 Enabled ShellCheck on `.jenkins/pytorch`
|
||||
65d5004b09fd8d5deac173a3aaa259f46eaa0d67
|
||||
# 2021-01-20 Replaced ` ` with `...` in many doctests
|
||||
c147aa306c6386a753fdff24b48d04e803070a63
|
||||
# 2021-03-05 Removed all trailing whitespace
|
||||
8c798e062216278673a75bac0848ea69a8bd3f03
|
||||
# 2021-03-30 Normalized trailing newlines
|
||||
5bcbbf537327f6e8328289c25a3a453a2444d984
|
||||
# 2021-03-31 Autogenerated Markdown ToCs
|
||||
a74b10def961ab090385f291ee06e66db99c1a2f
|
||||
# 2021-04-02 Enabled more ShellCheck warnings
|
||||
09670c7d43b9abce862a6bf71d8cc89e64764bdb
|
||||
# 2021-04-08 Removed all non-breaking spaces
|
||||
cc11aaaa60aadf28e3ec278bce26a42c1cd68a4f
|
||||
# 2021-04-13 Expanded many wildcard imports
|
||||
4753100a3baa96273204c361c8452afb7b59836f
|
||||
# 2021-04-19 Removed all unqualified `noqa`
|
||||
e3900d2ba5c9f91a24a9ce34520794c8366d5c54
|
||||
# 2021-04-21 Removed all unqualified `type: ignore`
|
||||
75024e228ca441290b6a1c2e564300ad507d7af6
|
||||
# 2021-05-14 Removed all versionless Python shebangs
|
||||
2e26976ad3b06ce95dd6afccfdbe124802edf28f
|
||||
# 2021-06-07 Strictly typed everything in `.github` and `tools`
|
||||
737d920b21db9b4292d056ee1329945990656304
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -2,5 +2,3 @@
|
||||
.circleci/config.yml linguist-generated=true
|
||||
.github/workflows/generated-*.yml linguist-generated=true
|
||||
.github/generated-* linguist-generated=true
|
||||
.github/scripts/gql_mocks.json linguist-generated=true
|
||||
third_party/LICENSES_BUNDLED.txt linguist-generated=true
|
||||
|
||||
49
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
49
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
name: "\U0001F41B Bug Report"
|
||||
about: Submit a bug report to help us improve PyTorch
|
||||
|
||||
---
|
||||
|
||||
## 🐛 Bug
|
||||
|
||||
<!-- A clear and concise description of what the bug is. -->
|
||||
|
||||
## To Reproduce
|
||||
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
1.
|
||||
1.
|
||||
1.
|
||||
|
||||
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
|
||||
|
||||
## Expected behavior
|
||||
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
|
||||
## Environment
|
||||
|
||||
Please copy and paste the output from our
|
||||
[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py)
|
||||
(or fill out the checklist below manually).
|
||||
|
||||
You can get the script and run it with:
|
||||
```
|
||||
wget https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py
|
||||
# For security purposes, please check the contents of collect_env.py before running it.
|
||||
python collect_env.py
|
||||
```
|
||||
|
||||
- PyTorch Version (e.g., 1.0):
|
||||
- OS (e.g., Linux):
|
||||
- How you installed PyTorch (`conda`, `pip`, source):
|
||||
- Build command you used (if compiling from source):
|
||||
- Python version:
|
||||
- CUDA/cuDNN version:
|
||||
- GPU models and configuration:
|
||||
- Any other relevant information:
|
||||
|
||||
## Additional context
|
||||
|
||||
<!-- Add any other context about the problem here. -->
|
||||
56
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
56
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -1,56 +0,0 @@
|
||||
name: 🐛 Bug Report
|
||||
description: Create a report to help us reproduce and fix the bug
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
#### Before submitting a bug, please make sure the issue hasn't been already addressed by searching through [the existing and past issues](https://github.com/pytorch/pytorch/issues?q=is%3Aissue+sort%3Acreated-desc+).
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 🐛 Describe the bug
|
||||
description: |
|
||||
Please provide a clear and concise description of what the bug is.
|
||||
|
||||
If relevant, add a minimal example so that we can reproduce the error by running the code. It is very important for the snippet to be as succinct (minimal) as possible, so please take time to trim down any irrelevant code to help us debug efficiently. We are going to copy-paste your code and we expect to get the same result as you did: avoid any external data, and include the relevant imports, etc. For example:
|
||||
|
||||
```python
|
||||
# All necessary imports at the beginning
|
||||
import torch
|
||||
|
||||
# A succinct reproducing example trimmed down to the essential parts:
|
||||
t = torch.rand(5, 10) # Note: the bug is here, we should pass requires_grad=True
|
||||
t.sum().backward()
|
||||
```
|
||||
|
||||
If the code is too long (hopefully, it isn't), feel free to put it in a public gist and link it in the issue: https://gist.github.com.
|
||||
|
||||
Please also paste or describe the results you observe instead of the expected results. If you observe an error, please paste the error message including the **full** traceback of the exception. It may be relevant to wrap error messages in ```` ```triple quotes blocks``` ````.
|
||||
placeholder: |
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
```python
|
||||
# Sample code to reproduce the problem
|
||||
```
|
||||
|
||||
```
|
||||
The error message you got, with the full traceback.
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Versions
|
||||
description: |
|
||||
Please run the following and paste the output below.
|
||||
```sh
|
||||
wget https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py
|
||||
# For security purposes, please check the contents of collect_env.py before running it.
|
||||
python collect_env.py
|
||||
```
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
||||
39
.github/ISSUE_TEMPLATE/ci-sev.md
vendored
39
.github/ISSUE_TEMPLATE/ci-sev.md
vendored
@ -1,39 +0,0 @@
|
||||
---
|
||||
name: "⚠️ CI SEV"
|
||||
about: Tracking incidents for PyTorch's CI infra.
|
||||
---
|
||||
|
||||
> NOTE: Remember to label this issue with "`ci: sev`"
|
||||
|
||||
## Current Status
|
||||
*Status could be: preemptive, ongoing, mitigated, closed. Also tell people if they need to take action to fix it (i.e. rebase)*.
|
||||
|
||||
## Error looks like
|
||||
*Provide some way users can tell that this SEV is causing their issue.*
|
||||
|
||||
## Incident timeline (all times pacific)
|
||||
*Include when the incident began, when it was detected, mitigated, root caused, and finally closed.*
|
||||
|
||||
<details>
|
||||
<summary> Click for example </summary>
|
||||
|
||||
e.g.
|
||||
- 10/30 7:27a incident began
|
||||
- 10/30 8:30a detected by <method>
|
||||
- 10/30 9:00 pm root caused as…
|
||||
- 10/30 9:10 pm mitigated by…
|
||||
- 10/31 10: am closed by…
|
||||
|
||||
</details>
|
||||
|
||||
## User impact
|
||||
*How does this affect users of PyTorch CI?*
|
||||
|
||||
## Root cause
|
||||
*What was the root cause of this issue?*
|
||||
|
||||
## Mitigation
|
||||
*How did we mitigate the issue?*
|
||||
|
||||
## Prevention/followups
|
||||
*How do we prevent issues like this in the future?*
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
5
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,5 +0,0 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Questions
|
||||
url: https://discuss.pytorch.org/
|
||||
about: Ask questions and discuss with other PyTorch community members
|
||||
9
.github/ISSUE_TEMPLATE/documentation.md
vendored
Normal file
9
.github/ISSUE_TEMPLATE/documentation.md
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
name: "\U0001F4DA Documentation"
|
||||
about: Report an issue related to https://pytorch.org/docs
|
||||
|
||||
---
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
<!-- A clear and concise description of what content in https://pytorch.org/docs is an issue. If this has to do with the general https://pytorch.org website, please file an issue at https://github.com/pytorch/pytorch.github.io/issues/new/choose instead. If this has to do with https://pytorch.org/tutorials, please file an issue at https://github.com/pytorch/tutorials/issues/new -->
|
||||
20
.github/ISSUE_TEMPLATE/documentation.yml
vendored
20
.github/ISSUE_TEMPLATE/documentation.yml
vendored
@ -1,20 +0,0 @@
|
||||
name: 📚 Documentation
|
||||
description: Report an issue related to https://pytorch.org/docs/stable/index.html
|
||||
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 📚 The doc issue
|
||||
description: >
|
||||
A clear and concise description of what content in https://pytorch.org/docs/stable/index.html is an issue. If this has to do with the general https://pytorch.org website, please file an issue at https://github.com/pytorch/pytorch.github.io/issues/new/choose instead. If this has to do with https://pytorch.org/tutorials, please file an issue at https://github.com/pytorch/tutorials/issues/new.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Suggest a potential alternative/fix
|
||||
description: >
|
||||
Tell us how we could improve the documentation in this regard.
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
||||
24
.github/ISSUE_TEMPLATE/feature-request.md
vendored
Normal file
24
.github/ISSUE_TEMPLATE/feature-request.md
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
name: "\U0001F680 Feature Request"
|
||||
about: Submit a proposal/request for a new PyTorch feature
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Feature
|
||||
<!-- A clear and concise description of the feature proposal -->
|
||||
|
||||
## Motivation
|
||||
|
||||
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
|
||||
|
||||
## Pitch
|
||||
|
||||
<!-- A clear and concise description of what you want to happen. -->
|
||||
|
||||
## Alternatives
|
||||
|
||||
<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->
|
||||
|
||||
## Additional context
|
||||
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
25
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
25
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@ -1,25 +0,0 @@
|
||||
name: 🚀 Feature request
|
||||
description: Submit a proposal/request for a new PyTorch feature
|
||||
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: 🚀 The feature, motivation and pitch
|
||||
description: >
|
||||
A clear and concise description of the feature proposal. Please outline the motivation for the proposal. Is your feature request related to a specific problem? e.g., *"I'm working on X and would like Y to be possible"*. If this is related to another GitHub issue, please link here too.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Alternatives
|
||||
description: >
|
||||
A description of any alternative solutions or features you've considered, if any.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: >
|
||||
Add any other context or screenshots about the feature request.
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Thanks for contributing 🎉!
|
||||
13
.github/ISSUE_TEMPLATE/questions-help-support.md
vendored
Normal file
13
.github/ISSUE_TEMPLATE/questions-help-support.md
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
name: "❓Questions/Help/Support"
|
||||
about: Do you need support? We have resources.
|
||||
|
||||
---
|
||||
|
||||
## ❓ Questions and Help
|
||||
|
||||
### Please note that this issue tracker is not a help form and this issue will be closed.
|
||||
|
||||
We have a set of [listed resources available on the website](https://pytorch.org/resources). Our primary means of support is our discussion forum:
|
||||
|
||||
- [Discussion Forum](https://discuss.pytorch.org/)
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1 +1 @@
|
||||
Fixes #ISSUE_NUMBER
|
||||
Fixes #{issue number}
|
||||
|
||||
9
.github/actionlint.yaml
vendored
9
.github/actionlint.yaml
vendored
@ -1,17 +1,8 @@
|
||||
self-hosted-runner:
|
||||
labels:
|
||||
- linux.20_04.4x
|
||||
- linux.20_04.16x
|
||||
- linux.large
|
||||
- linux.2xlarge
|
||||
- linux.4xlarge
|
||||
- linux.4xlarge.nvidia.gpu
|
||||
- linux.8xlarge.nvidia.gpu
|
||||
- linux.16xlarge.nvidia.gpu
|
||||
- windows.4xlarge
|
||||
- windows.8xlarge.nvidia.gpu
|
||||
- bm-runner
|
||||
- linux.rocm.gpu
|
||||
- macos-12-xl
|
||||
- macos-12
|
||||
- macos12.3-m1
|
||||
|
||||
82
.github/actions/build-android/action.yml
vendored
82
.github/actions/build-android/action.yml
vendored
@ -1,82 +0,0 @@
|
||||
name: build android
|
||||
|
||||
description: build android for a specific arch
|
||||
|
||||
inputs:
|
||||
arch:
|
||||
description: arch to build
|
||||
required: true
|
||||
arch-for-build-env:
|
||||
description: |
|
||||
arch to pass to build environment.
|
||||
This is currently different than the arch name we use elswhere, which
|
||||
should be fixed.
|
||||
required: true
|
||||
github-secret:
|
||||
description: github token
|
||||
required: true
|
||||
build-environment:
|
||||
required: true
|
||||
description: Top-level label for what's being built/tested.
|
||||
docker-image:
|
||||
required: true
|
||||
description: Name of the base docker image to build with.
|
||||
branch:
|
||||
required: true
|
||||
description: What branch we are building on.
|
||||
outputs:
|
||||
container_id:
|
||||
description: Docker container identifier used to build the artifacts
|
||||
value: ${{ steps.build.outputs.container_id }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Build-${{ inputs.arch }}
|
||||
id: build
|
||||
shell: bash
|
||||
env:
|
||||
BRANCH: ${{ inputs.branch }}
|
||||
JOB_BASE_NAME: ${{ inputs.build-environment }}-build-and-test
|
||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-${{ inputs.arch-for-build-env }}-build"
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
CUSTOM_TEST_ARTIFACT_BUILD_DIR: build/custom_test_artifacts
|
||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
||||
DOCKER_IMAGE: ${{ inputs.docker-image }}
|
||||
MATRIX_ARCH: ${{ inputs.arch }}
|
||||
run: |
|
||||
# detached container should get cleaned up by teardown_ec2_linux
|
||||
set -exo pipefail
|
||||
export container_name
|
||||
container_name=$(docker run \
|
||||
-e BUILD_ENVIRONMENT \
|
||||
-e JOB_BASE_NAME \
|
||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
||||
-e AWS_DEFAULT_REGION \
|
||||
-e IS_GHA \
|
||||
-e PR_NUMBER \
|
||||
-e SHA1 \
|
||||
-e BRANCH \
|
||||
-e GITHUB_RUN_ID \
|
||||
-e SCCACHE_BUCKET \
|
||||
-e CUSTOM_TEST_ARTIFACT_BUILD_DIR \
|
||||
-e SKIP_SCCACHE_INITIALIZATION=1 \
|
||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
||||
--security-opt seccomp=unconfined \
|
||||
--cap-add=SYS_PTRACE \
|
||||
--tty \
|
||||
--detach \
|
||||
--user jenkins \
|
||||
-w /var/lib/jenkins/workspace \
|
||||
"${DOCKER_IMAGE}"
|
||||
)
|
||||
git submodule sync && git submodule update -q --init --recursive --depth 1 --jobs 0
|
||||
docker cp "${GITHUB_WORKSPACE}/." "${container_name}:/var/lib/jenkins/workspace"
|
||||
(echo "sudo chown -R jenkins . && .jenkins/pytorch/build.sh && find ${BUILD_ROOT} -type f -name "*.a" -or -name "*.o" -delete" | docker exec -u jenkins -i "${container_name}" bash) 2>&1
|
||||
|
||||
# Copy install binaries back
|
||||
mkdir -p "${GITHUB_WORKSPACE}/build_android_install_${MATRIX_ARCH}"
|
||||
docker cp "${container_name}:/var/lib/jenkins/workspace/build_android/install" "${GITHUB_WORKSPACE}/build_android_install_${MATRIX_ARCH}"
|
||||
echo "::set-output name=container_id::${container_name}"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user