mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-28 10:34:54 +08:00
Compare commits
61 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 56b43f4fec | |||
| 6c394614f0 | |||
| 7c3c293ea7 | |||
| 9d43171746 | |||
| f3c950e04e | |||
| b6f49807db | |||
| d84e05be49 | |||
| c6139b7915 | |||
| 30baaef738 | |||
| 264d0ecf83 | |||
| 51233ea4b0 | |||
| 31a1a00ae8 | |||
| bb98a99638 | |||
| 295c7cf1de | |||
| 3233861ec4 | |||
| 47f4b3f7d4 | |||
| e450f1498f | |||
| 6fd01f9440 | |||
| b33e434d55 | |||
| a3e4bf60bb | |||
| e991cdaf58 | |||
| 4596a8ec8a | |||
| 512f289884 | |||
| c439f85b16 | |||
| 30712fca7e | |||
| debf62d95c | |||
| e30dc8d21b | |||
| 4e590c9ced | |||
| 6e9f2c8df0 | |||
| 37c1f4a7fe | |||
| 49b74a52a4 | |||
| 11c78e9cb3 | |||
| d6943ea58d | |||
| 02b61b49ea | |||
| d553478c98 | |||
| 63333e2a25 | |||
| 8e7eebfc9a | |||
| f8afb8bdd0 | |||
| 0851cc42b0 | |||
| 804f7b6018 | |||
| 32758d30b3 | |||
| bcb64a8084 | |||
| f07991d396 | |||
| c458cd4852 | |||
| f7c4afc0f4 | |||
| 20554c00b6 | |||
| 3464d64f08 | |||
| c6972eb3ac | |||
| 25562d3d41 | |||
| cd63c37bc6 | |||
| c79decdbba | |||
| c307a3f336 | |||
| f071020756 | |||
| 4f436f8570 | |||
| ae11589710 | |||
| 9e5bcc1020 | |||
| fa8578241d | |||
| 1368809532 | |||
| 4073248fc2 | |||
| 75153cb730 | |||
| 5bb69b080c |
@ -1,63 +0,0 @@
|
|||||||
# PyTorch CI Builds Pipeline on Azure DevOps
|
|
||||||
#
|
|
||||||
# This pipeline:
|
|
||||||
# 1) builds PyTorch on select configurations
|
|
||||||
# 2) runs only TestTorch unit tests.
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- stage: 'Build'
|
|
||||||
displayName: 'Build PyTorch'
|
|
||||||
jobs:
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_CPU_docker
|
|
||||||
pool: 'PyTorch-Linux-CPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
build_stage: True
|
|
||||||
is_ci_build: True
|
|
||||||
os: ubuntu
|
|
||||||
cuda: cpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_38:
|
|
||||||
configuration: ubuntu_1804_py_38_cpu
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_GPU_docker
|
|
||||||
pool: 'PyTorch-Linux-GPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
build_stage: True
|
|
||||||
is_ci_build: True
|
|
||||||
os: ubuntu
|
|
||||||
cuda: gpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_39_CUDA_112_cuDNN_810:
|
|
||||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
|
||||||
CUDA_VERSION: 112
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_CPU
|
|
||||||
pool: 'PyTorch-Win-CPU'
|
|
||||||
build_stage: True
|
|
||||||
is_ci_build: True
|
|
||||||
os: windows
|
|
||||||
cuda: cpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_37:
|
|
||||||
configuration: windows_2019_py_37_cpu
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_GPU
|
|
||||||
pool: 'PyTorch-Win-GPU'
|
|
||||||
build_stage: True
|
|
||||||
is_ci_build: True
|
|
||||||
os: windows
|
|
||||||
cuda: gpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_38_CUDA_102_cuDNN_765:
|
|
||||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
|
||||||
CUDA_VERSION: 102
|
|
||||||
@ -1,82 +0,0 @@
|
|||||||
# PyTorch Daily Builds Pipeline on Azure DevOps
|
|
||||||
#
|
|
||||||
# This pipeline:
|
|
||||||
# 1) builds PyTorch on all available configurations
|
|
||||||
# 2) runs all PyTorch unit tests
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- stage: 'BuildTest'
|
|
||||||
displayName: 'Build and Test PyTorch'
|
|
||||||
jobs:
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_CPU_docker
|
|
||||||
pool: 'PyTorch-Linux-CPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
build_stage: True
|
|
||||||
is_daily_build: True
|
|
||||||
os: ubuntu
|
|
||||||
cuda: cpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_38:
|
|
||||||
configuration: ubuntu_1804_py_38_cpu
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
|
||||||
Py_37:
|
|
||||||
configuration: ubuntu_1804_py_37_cpu
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_GPU_docker
|
|
||||||
pool: 'PyTorch-Linux-GPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
build_stage: True
|
|
||||||
is_daily_build: True
|
|
||||||
os: ubuntu
|
|
||||||
cuda: gpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_39_CUDA_112_cuDNN_810:
|
|
||||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
|
||||||
CUDA_VERSION: 112
|
|
||||||
Py_38_CUDA_102_cuDNN_810:
|
|
||||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
|
||||||
CUDA_VERSION: 102
|
|
||||||
Py_37_CUDA_101_cuDNN_765:
|
|
||||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
|
||||||
CUDA_VERSION: 101
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_CPU
|
|
||||||
pool: 'PyTorch-Win-CPU'
|
|
||||||
build_stage: True
|
|
||||||
is_daily_build: True
|
|
||||||
os: windows
|
|
||||||
cuda: cpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_38:
|
|
||||||
configuration: windows_2019_py_38_cpu
|
|
||||||
Py_37:
|
|
||||||
configuration: windows_2019_py_37_cpu
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_GPU
|
|
||||||
pool: 'PyTorch-Win-GPU'
|
|
||||||
build_stage: True
|
|
||||||
is_daily_build: True
|
|
||||||
os: windows
|
|
||||||
cuda: gpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_39_CUDA_112_cuDNN_810:
|
|
||||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
|
||||||
CUDA_VERSION: 112
|
|
||||||
Py_38_CUDA_102_cuDNN_765:
|
|
||||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
|
||||||
CUDA_VERSION: 102
|
|
||||||
Py_37_CUDA_101_cuDNN_764:
|
|
||||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
|
||||||
CUDA_VERSION: 101
|
|
||||||
@ -1,134 +0,0 @@
|
|||||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
|
||||||
#
|
|
||||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
|
||||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
|
||||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
|
||||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
|
||||||
|
|
||||||
parameters:
|
|
||||||
name: ''
|
|
||||||
pool: ''
|
|
||||||
container_endpoint: ''
|
|
||||||
os: ''
|
|
||||||
cuda: ''
|
|
||||||
is_ci_build: False
|
|
||||||
is_official_build: False
|
|
||||||
is_daily_build: False
|
|
||||||
build_stage: False
|
|
||||||
verify_stage: False
|
|
||||||
publish_stage: False
|
|
||||||
customMatrixes: ''
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
- job: ${{parameters.name}}
|
|
||||||
timeoutInMinutes: 300
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
${{ insert }}: ${{parameters.customMatrixes}}
|
|
||||||
pool:
|
|
||||||
name: ${{ parameters.pool}}
|
|
||||||
variables:
|
|
||||||
DECODE_PERCENTS: false
|
|
||||||
container:
|
|
||||||
image: $[variables['container_image']]
|
|
||||||
endpoint: ${{parameters.container_endpoint}}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Build stage
|
|
||||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
|
||||||
# Set up environment variables for specific pipeline build
|
|
||||||
- template: set-environment-variables.yml
|
|
||||||
parameters:
|
|
||||||
os: ${{ parameters.os}}
|
|
||||||
cuda: ${{ parameters.cuda}}
|
|
||||||
is_official_build: ${{ parameters.is_official_build}}
|
|
||||||
|
|
||||||
# Sync and update PyTorch submodules
|
|
||||||
- bash: git submodule update --init --recursive
|
|
||||||
displayName: Update PyTorch submodules
|
|
||||||
|
|
||||||
# Build PyTorch and run unit tests - no packaging
|
|
||||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
|
||||||
# Build PyTorch from source in develop mode
|
|
||||||
- bash: python setup.py develop
|
|
||||||
displayName: Build PyTorch from source
|
|
||||||
|
|
||||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
|
||||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
|
||||||
- bash: python test/test_torch.py TestTorch
|
|
||||||
displayName: Run TestTorch unit tests
|
|
||||||
|
|
||||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
|
||||||
# Run all unit tests to demonstrate successful PyTorch build
|
|
||||||
- bash: python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
|
||||||
displayName: Run all unit tests
|
|
||||||
|
|
||||||
# Run ComponentGovernance
|
|
||||||
- task: ComponentGovernanceComponentDetection@0
|
|
||||||
inputs:
|
|
||||||
scanType: 'Register'
|
|
||||||
verbosity: 'Verbose'
|
|
||||||
alertWarningLevel: 'High'
|
|
||||||
|
|
||||||
# Build PyTorch and produce artifacts for verification stage
|
|
||||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
|
||||||
# Build PyTorch from source in install mode and exclude test binaries
|
|
||||||
- bash: python setup.py install
|
|
||||||
displayName: Build PyTorch from source without test binaries
|
|
||||||
|
|
||||||
# Package PyTorch Wheel
|
|
||||||
- bash: python setup.py bdist_wheel
|
|
||||||
displayName: Package PyTorch Wheel
|
|
||||||
|
|
||||||
# Publish PyTorch Wheel
|
|
||||||
- task: PublishPipelineArtifact@1
|
|
||||||
inputs:
|
|
||||||
targetPath: $(Build.SourcesDirectory)/dist/
|
|
||||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
|
||||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
|
||||||
|
|
||||||
# Verification stage
|
|
||||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
|
||||||
# Download PyTorch Wheel
|
|
||||||
- task: DownloadPipelineArtifact@2
|
|
||||||
inputs:
|
|
||||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
|
||||||
path: $(Build.SourcesDirectory)/verify
|
|
||||||
displayName: Download PyTorch Wheel
|
|
||||||
|
|
||||||
# Install PyTorch Wheel on Windows
|
|
||||||
- bash: python -m pip install $(Build.SourcesDirectory)/verify/torch*linux*.whl
|
|
||||||
displayName: Install PyTorch Wheel
|
|
||||||
|
|
||||||
# Ensure PyTorch installed correctly from produced wheel
|
|
||||||
- bash: |
|
|
||||||
cd $(Build.SourcesDirectory)/verify
|
|
||||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
|
||||||
displayName: Check PyTorch correctly installed from wheel
|
|
||||||
|
|
||||||
# Publishing stage
|
|
||||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
|
||||||
# Download PyTorch Wheel
|
|
||||||
- task: DownloadPipelineArtifact@2
|
|
||||||
inputs:
|
|
||||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
|
||||||
path: $(Build.SourcesDirectory)/publish
|
|
||||||
displayName: Download PyTorch Wheel
|
|
||||||
|
|
||||||
# Publish wheel to Azure Artifacts
|
|
||||||
# The flag continueOnError=true is needed as the artifact to be published
|
|
||||||
# may already exist, because the artifact is differentiated based on the
|
|
||||||
# last commit date.
|
|
||||||
- bash: |
|
|
||||||
export TORCH_VERSION=$(head -c 5 ./version.txt)
|
|
||||||
export LAST_COMMIT=$(git rev-parse --short HEAD)
|
|
||||||
export LAST_COMMIT_DATE=$(git log -1 --pretty=%ad --date=format:%Y%m%d)
|
|
||||||
cd $(Build.SourcesDirectory)/publish
|
|
||||||
export TORCH_WHEEL=$(echo torch*linux*whl)
|
|
||||||
az extension add -n azure-devops
|
|
||||||
echo $ADOTOKEN | az devops login
|
|
||||||
az artifacts universal publish --organization $AZURE_DEVOPS_ARTIFACTS_ORGANIZATION --project $AZURE_DEVOPS_ARTIFACTS_PROJECT --scope project --feed "PyTorch" --name $TORCH_WHEEL --description "PyTorch Official Build Artifact" --version $TORCH_VERSION-$LAST_COMMIT_DATE-$LAST_COMMIT --path .
|
|
||||||
env:
|
|
||||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
|
||||||
continueOnError: true
|
|
||||||
displayName: Upload PyTorch Official Build package to Azure Artifacts
|
|
||||||
@ -1,150 +0,0 @@
|
|||||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
|
||||||
#
|
|
||||||
# This build depends on 3 parameters set as environment variables in the pipeline:
|
|
||||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
|
||||||
# - AZURE_DEVOPS_ARTIFACTS_ORGANIZATION: Azure Artifacts Organization name to publish artifacts
|
|
||||||
# - AZURE_DEVOPS_ARTIFACTS_PROJECT: Azure Artifacts Project name to publish artifacts
|
|
||||||
|
|
||||||
parameters:
|
|
||||||
name: ''
|
|
||||||
pool: ''
|
|
||||||
os: ''
|
|
||||||
cuda: ''
|
|
||||||
is_ci_build: False
|
|
||||||
is_official_build: False
|
|
||||||
is_daily_build: False
|
|
||||||
build_stage: False
|
|
||||||
verify_stage: False
|
|
||||||
publish_stage: False
|
|
||||||
customMatrixes: ''
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
- job: ${{parameters.name}}
|
|
||||||
timeoutInMinutes: 300
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
${{ insert }}: ${{parameters.customMatrixes}}
|
|
||||||
pool:
|
|
||||||
name: ${{ parameters.pool}}
|
|
||||||
variables:
|
|
||||||
CMAKE_GENERATOR: Ninja
|
|
||||||
PACKAGE_PDBS: 0
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Prepare for PyTorch build on Windows
|
|
||||||
- template: prepare-build-template.yml
|
|
||||||
parameters:
|
|
||||||
configuration: $(configuration)
|
|
||||||
build_stage: ${{ parameters.build_stage}}
|
|
||||||
|
|
||||||
# Build Stage
|
|
||||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
|
||||||
# Set up environment variables for specific pipeline build
|
|
||||||
- template: set-environment-variables.yml
|
|
||||||
parameters:
|
|
||||||
os: ${{ parameters.os}}
|
|
||||||
cuda: ${{ parameters.cuda}}
|
|
||||||
is_official_build: ${{ parameters.is_official_build}}
|
|
||||||
|
|
||||||
# Sync and update PyTorch submodules
|
|
||||||
- script: git submodule update --init --recursive
|
|
||||||
displayName: Update PyTorch submodules
|
|
||||||
|
|
||||||
# Build PyTorch and run unit tests - no packaging
|
|
||||||
- ${{ if or(eq(parameters.is_ci_build, 'True'), eq(parameters.is_daily_build, 'True')) }}:
|
|
||||||
# Build PyTorch from source in develop mode with Ninja
|
|
||||||
- script: call activate $(configuration) && python setup.py develop
|
|
||||||
displayName: Build PyTorch from source
|
|
||||||
|
|
||||||
- ${{ if eq(parameters.is_ci_build, 'True') }}:
|
|
||||||
# Run TestTorch unit tests to demonstrate successful PyTorch build
|
|
||||||
- script: call activate $(configuration) && python test\test_torch.py TestTorch
|
|
||||||
displayName: Run TestTorch unit tests
|
|
||||||
|
|
||||||
- ${{ if eq(parameters.is_daily_build, 'True') }}:
|
|
||||||
# Run all unit tests to demonstrate successful PyTorch build
|
|
||||||
- script: call activate $(configuration) && python test/run_test.py --continue-through-error --exclude-jit-executor --verbose
|
|
||||||
displayName: Run all unit tests
|
|
||||||
|
|
||||||
# Run ComponentGovernance
|
|
||||||
- task: ComponentGovernanceComponentDetection@0
|
|
||||||
inputs:
|
|
||||||
scanType: 'Register'
|
|
||||||
verbosity: 'Verbose'
|
|
||||||
alertWarningLevel: 'High'
|
|
||||||
|
|
||||||
# Build PyTorch and produce artifacts for verification stage
|
|
||||||
- ${{ if eq(parameters.is_official_build, 'True') }}:
|
|
||||||
# Build PyTorch from source in install mode with Ninja and exclude test binaries
|
|
||||||
- script: call activate $(configuration) && python setup.py install
|
|
||||||
displayName: Build PyTorch from source without test binaries
|
|
||||||
|
|
||||||
# Package PyTorch Wheel
|
|
||||||
- script: call activate $(configuration) && python setup.py bdist_wheel
|
|
||||||
displayName: Package PyTorch Wheel
|
|
||||||
|
|
||||||
# Publish PyTorch Wheel
|
|
||||||
- task: PublishPipelineArtifact@1
|
|
||||||
inputs:
|
|
||||||
targetPath: $(Build.SourcesDirectory)\dist\
|
|
||||||
artifactName: Build_$(Build.BuildNumber)_$(configuration)
|
|
||||||
displayName: Publish PyTorch Wheel to Pipeline Artifacts
|
|
||||||
|
|
||||||
# Verification Stage
|
|
||||||
- ${{ if eq(parameters.verify_stage, 'True') }}:
|
|
||||||
# Download PyTorch Wheel
|
|
||||||
- task: DownloadPipelineArtifact@2
|
|
||||||
inputs:
|
|
||||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
|
||||||
path: $(Build.SourcesDirectory)\verify
|
|
||||||
displayName: Download PyTorch Wheel
|
|
||||||
|
|
||||||
# Install PyTorch Wheel on Windows
|
|
||||||
- script: |
|
|
||||||
call activate $(configuration)
|
|
||||||
cd $(Build.SourcesDirectory)\verify
|
|
||||||
dir torch*win*.whl /b > whl.txt
|
|
||||||
set /p whl= < whl.txt
|
|
||||||
python -m pip install %whl%
|
|
||||||
displayName: Install PyTorch Wheel
|
|
||||||
|
|
||||||
# Ensure PyTorch installed correctly from produced wheel
|
|
||||||
- script: |
|
|
||||||
call activate $(configuration)
|
|
||||||
cd $(Build.SourcesDirectory)\verify
|
|
||||||
python -c "import torch; print('Installed Torch version: ' + torch.__version__)"
|
|
||||||
displayName: Check PyTorch correctly installed from wheel
|
|
||||||
|
|
||||||
# Publishing stage
|
|
||||||
- ${{ if eq(parameters.publish_stage, 'True') }}:
|
|
||||||
# Download PyTorch Wheel
|
|
||||||
- task: DownloadPipelineArtifact@2
|
|
||||||
inputs:
|
|
||||||
artifact: Build_$(Build.BuildNumber)_$(configuration)
|
|
||||||
path: $(Build.SourcesDirectory)\publish
|
|
||||||
displayName: Download PyTorch Wheel
|
|
||||||
|
|
||||||
# Set up Azure Artifacts for Windows
|
|
||||||
# The pip install --upgrade command is a bug fix for Azure CLI on Windows
|
|
||||||
# More info: https://github.com/Azure/azure-cli/issues/16858
|
|
||||||
- script: |
|
|
||||||
pip install --upgrade pip --target \opt\az\lib\python3.6\site-packages\
|
|
||||||
az extension add -n azure-devops
|
|
||||||
displayName: Set up Azure Artifacts download on Windows
|
|
||||||
|
|
||||||
# Publish wheel to Azure Artifacts
|
|
||||||
# The flag continueOnError=true is needed as the artifact to be published
|
|
||||||
# may already exist, because the artifact is differentiated based on the
|
|
||||||
# last commit date.
|
|
||||||
- script: |
|
|
||||||
set /p TORCH_VERSION= < version.txt
|
|
||||||
cd $(Build.SourcesDirectory)\publish
|
|
||||||
git rev-parse --short HEAD > last_commit.txt && set /p LAST_COMMIT= < last_commit.txt
|
|
||||||
git log -1 --pretty=%ad --date=format:%Y%m%d > last_commit_date.txt && set /p LAST_COMMIT_DATE= < last_commit_date.txt
|
|
||||||
dir torch*win*.whl /b > whl.txt && set /p TORCH_WHEEL= < whl.txt
|
|
||||||
echo %ADOTOKEN% | az devops login
|
|
||||||
az artifacts universal publish --organization %AZURE_DEVOPS_ARTIFACTS_ORGANIZATION% --project %AZURE_DEVOPS_ARTIFACTS_PROJECT% --scope project --feed "PyTorch" --name %TORCH_WHEEL% --description "PyTorch Official Build Artifact" --version %TORCH_VERSION:~0,5%-%LAST_COMMIT_DATE%-%LAST_COMMIT% --path .
|
|
||||||
env:
|
|
||||||
ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
|
||||||
continueOnError: true
|
|
||||||
displayName: Upload PyTorch nigthly package to Azure Artifacts
|
|
||||||
@ -1,17 +0,0 @@
|
|||||||
dependencies:
|
|
||||||
- python=PYTHON_VERSION
|
|
||||||
- numpy
|
|
||||||
- ninja
|
|
||||||
- pyyaml
|
|
||||||
- mkl
|
|
||||||
- mkl-include
|
|
||||||
- setuptools
|
|
||||||
- cmake
|
|
||||||
- cffi
|
|
||||||
- typing_extensions
|
|
||||||
- future
|
|
||||||
- six
|
|
||||||
- requests
|
|
||||||
- dataclasses
|
|
||||||
- pip:
|
|
||||||
- -r ../../requirements.txt
|
|
||||||
@ -1,62 +0,0 @@
|
|||||||
# Build prepare steps for PyTorch on Azure DevOps to build from source.
|
|
||||||
# These steps share between normal build process and semmle security scan tasks
|
|
||||||
|
|
||||||
parameters:
|
|
||||||
build_stage: False
|
|
||||||
configuration: ''
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# End Python tasks that may be lingering over from previous runs
|
|
||||||
# Note: If python.exe isn't currently running, exit code becomes 128,
|
|
||||||
# which fails the run. Here exit code is set to 0 to avoid failed run.
|
|
||||||
- script: |
|
|
||||||
taskkill /f /im python.exe
|
|
||||||
IF %ERRORLEVEL% EQU 128 exit 0
|
|
||||||
displayName: End previous Python processes
|
|
||||||
|
|
||||||
# Clean up env directory in conda for fresh builds and set up conda environment YAML
|
|
||||||
- powershell: |
|
|
||||||
Remove-Item 'C:\Miniconda\envs' -Recurse -ErrorAction Ignore
|
|
||||||
$env:PYTHON_VERSION = $env:SYSTEM_JOBNAME.Substring(3,1) + '.' + $env:SYSTEM_JOBNAME.Substring(4,1)
|
|
||||||
(Get-Content .azure_pipelines\job_templates\common-packages.yml) -replace 'PYTHON_VERSION', $env:PYTHON_VERSION | Out-File -encoding ASCII .azure_pipelines\job_templates\common-packages.yml
|
|
||||||
displayName: Clean up previous environments and Set up conda environment YAML
|
|
||||||
|
|
||||||
# Make conda environment and install required packages
|
|
||||||
- script: |
|
|
||||||
call conda clean --all -y
|
|
||||||
call conda env create -n $(configuration) --file .azure_pipelines\job_templates\common-packages.yml
|
|
||||||
call activate $(configuration)
|
|
||||||
call conda install -c conda-forge libuv=1.39
|
|
||||||
displayName: Set up conda environment for building from source
|
|
||||||
|
|
||||||
- ${{ if eq(parameters.build_stage, 'True') }}:
|
|
||||||
# Install MKL
|
|
||||||
- script: |
|
|
||||||
rmdir /s /q mkl
|
|
||||||
del mkl_2020.2.254.7z
|
|
||||||
curl https://s3.amazonaws.com/ossci-windows/mkl_2020.2.254.7z -k -O
|
|
||||||
7z x -aoa mkl_2020.2.254.7z -omkl
|
|
||||||
displayName: Install MKL
|
|
||||||
|
|
||||||
# Install sccache and randomtemp
|
|
||||||
# Related PyTorch GitHub issue: https://github.com/pytorch/pytorch/issues/25393
|
|
||||||
# Related fix: https://github.com/pytorch/builder/pull/448/
|
|
||||||
- script: |
|
|
||||||
mkdir .\tmp_bin
|
|
||||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output .\tmp_bin\sccache.exe
|
|
||||||
curl -k https://s3.amazonaws.com/ossci-windows/sccache-cl.exe --output .\tmp_bin\sccache-cl.exe
|
|
||||||
copy .\tmp_bin\sccache.exe .\tmp_bin\nvcc.exe
|
|
||||||
curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.3/randomtemp.exe --output .\tmp_bin\randomtemp.exe
|
|
||||||
displayName: Install sccache and randomtemp
|
|
||||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
|
||||||
|
|
||||||
# CUDA 11.2's CUB directory conflicts with CUDA 10.2 and 10.1
|
|
||||||
# builds, where CUDA 11.2's CUB is injected into non-CUDA
|
|
||||||
# 11.2 builds.
|
|
||||||
- powershell: Remove-Item "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include\cub" -Recurse -ErrorAction Ignore
|
|
||||||
displayName: Remove conflicting CUB from CUDA installation
|
|
||||||
condition: not(eq(variables.CUDA_VERSION, ''))
|
|
||||||
|
|
||||||
- powershell: Copy-Item -Path "F:\cuda_11_2\cub\" -Destination "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\include" -Recurse
|
|
||||||
displayName: Copy CUDA CUB for CUDA 11.2 build
|
|
||||||
condition: eq(variables.CUDA_VERSION, '112')
|
|
||||||
@ -1,51 +0,0 @@
|
|||||||
# PyTorch build steps template with Unix images Azure DevOps Instances
|
|
||||||
#
|
|
||||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
|
||||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
|
||||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
|
||||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
|
||||||
|
|
||||||
parameters:
|
|
||||||
name: ''
|
|
||||||
pool: ''
|
|
||||||
container_endpoint: ''
|
|
||||||
customMatrixes: ''
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
- job: ${{parameters.name}}
|
|
||||||
timeoutInMinutes: 600
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
${{ insert }}: ${{parameters.customMatrixes}}
|
|
||||||
pool:
|
|
||||||
name: ${{ parameters.pool}}
|
|
||||||
variables:
|
|
||||||
DECODE_PERCENTS: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
|
||||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
|
||||||
- checkout: none
|
|
||||||
|
|
||||||
# Delete pytorch_tests repo from previous builds if exists
|
|
||||||
- bash: rm -rf pytorch_tests/
|
|
||||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
|
||||||
|
|
||||||
# Clone PyTorch Tests repository
|
|
||||||
- bash: |
|
|
||||||
B64_PAT=$(printf "%s"":$_ADOTOKEN" | base64)
|
|
||||||
git -c http.extraHeader="Authorization: Basic ${B64_PAT}" clone $(AZURE_DEVOPS_PYTORCH_TESTS_REPO_URL)
|
|
||||||
cd pytorch_tests
|
|
||||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
|
||||||
env:
|
|
||||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
|
||||||
displayName: Clone PyTorch Tests repo
|
|
||||||
|
|
||||||
# Run PyTorch Unit Tests
|
|
||||||
- bash: bash $(Build.SourcesDirectory)/pytorch_tests/scripts/linux/run.sh
|
|
||||||
env:
|
|
||||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
|
||||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
|
||||||
_TS_P: $(TS_PAT)
|
|
||||||
_TS_SM_P: $(TS_SM_PAT)
|
|
||||||
displayName: Run PyTorch Unit Tests
|
|
||||||
@ -1,49 +0,0 @@
|
|||||||
# PyTorch build steps template with Windows images Azure DevOps Instances
|
|
||||||
#
|
|
||||||
# This build depends on 5 parameters set as an environment variables in the pipeline:
|
|
||||||
# - AZURE_DEVOPS_CLI_PAT: Secret var for authenticating to Azure DevOps
|
|
||||||
# - AZURE_STORAGE_KEY: Secret var for authenticating to Azure Storage
|
|
||||||
# - _TS_CLONE_P, _TS_P, _TS_SM_P: Secret vars for specific unit tests
|
|
||||||
|
|
||||||
parameters:
|
|
||||||
name: ''
|
|
||||||
pool: ''
|
|
||||||
customMatrixes: ''
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
- job: ${{parameters.name}}
|
|
||||||
timeoutInMinutes: 600
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
${{ insert }}: ${{parameters.customMatrixes}}
|
|
||||||
pool:
|
|
||||||
name: ${{ parameters.pool}}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Don't checkout repo contents to save time and CPU compute. Environment variables
|
|
||||||
# related to checkout branch such as $(BUILD_SOURCEBRANCH) are still available.
|
|
||||||
- checkout: none
|
|
||||||
|
|
||||||
# Delete pytorch_tests repo from previous builds if exists
|
|
||||||
- script: if exist "pytorch_tests/" rmdir "pytorch_tests/" /q /s
|
|
||||||
displayName: Delete pytorch_tests repo from previous builds if exists
|
|
||||||
|
|
||||||
# Clone PyTorch Tests repository
|
|
||||||
- powershell: |
|
|
||||||
$env:B64Pat = [Convert]::ToBase64String([System.Text.Encoding]::UTF8.GetBytes(":$env:_ADOTOKEN"))
|
|
||||||
git -c http.extraHeader="Authorization: Basic $env:B64Pat" clone $env:AZURE_DEVOPS_pytorch_tests_REPO_URL
|
|
||||||
cd pytorch_tests
|
|
||||||
git checkout $(PYTORCH_TESTS_CHECKOUT_BRANCH)
|
|
||||||
env:
|
|
||||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
|
||||||
displayName: Clone PyTorch Tests repo
|
|
||||||
|
|
||||||
# Run PyTorch Unit Tests
|
|
||||||
- script: call $(Build.SourcesDirectory)\pytorch_tests\scripts\windows\run.bat
|
|
||||||
env:
|
|
||||||
_ADOTOKEN: $(AZURE_DEVOPS_CLI_PAT)
|
|
||||||
_AZURE_STORAGE_KEY: $(AZURE_STORAGE_KEY)
|
|
||||||
_TS_CLONE_P: $(TS_CLONE_PASSWORD)
|
|
||||||
_TS_P: $(TS_PAT)
|
|
||||||
_TS_SM_P: $(TS_SM_PAT)
|
|
||||||
displayName: Run PyTorch Unit Tests
|
|
||||||
@ -1,131 +0,0 @@
|
|||||||
# Set environment variables for specific configurations
|
|
||||||
|
|
||||||
parameters:
|
|
||||||
is_official_build: False
|
|
||||||
os: ''
|
|
||||||
cuda: ''
|
|
||||||
|
|
||||||
steps:
|
|
||||||
# Environment configuration steps for Ubuntu builds
|
|
||||||
- ${{ if contains(parameters.os, 'ubuntu') }}:
|
|
||||||
# Set configuration specific build flags
|
|
||||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
|
||||||
- bash: |
|
|
||||||
echo "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
|
||||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
|
||||||
export PYTORCH_VERSION=$(head -c 5 ./version.txt)
|
|
||||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
|
||||||
displayName: Set configuration-specific build flags
|
|
||||||
|
|
||||||
# Set PyTorch CPU/GPU build flags.
|
|
||||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
|
||||||
- bash: |
|
|
||||||
echo "##vso[task.setvariable variable=USE_CUDA;]0"
|
|
||||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
|
||||||
displayName: Set CUDA-specific build flag for CPU builds
|
|
||||||
|
|
||||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
|
||||||
- bash: |
|
|
||||||
echo "##vso[task.setvariable variable=USE_CUDA;]1"
|
|
||||||
echo "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
|
||||||
displayName: Set CUDA-specific build flag for GPU builds
|
|
||||||
|
|
||||||
# Set MKL environment variables
|
|
||||||
- bash: |
|
|
||||||
echo "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]/opt/intel/lib:$CMAKE_LIBRARY_PATH"
|
|
||||||
echo "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]/opt/intel/include:$CMAKE_INCLUDE_PATH"
|
|
||||||
displayName: Set MKL paths
|
|
||||||
|
|
||||||
# View current environment variables
|
|
||||||
- bash:
|
|
||||||
printenv
|
|
||||||
displayName: Show environment variables
|
|
||||||
|
|
||||||
# Environment configuration steps for Windows builds
|
|
||||||
- ${{ if contains(parameters.os, 'windows') }}:
|
|
||||||
# Set Conda Lib Path
|
|
||||||
- powershell: Write-Host "##vso[task.setvariable variable=CONDA_LIB_PATH;]C:\Miniconda\envs\$(configuration)\Library\bin"
|
|
||||||
displayName: Set Conda Lib Path
|
|
||||||
|
|
||||||
# Set configuration specific build flags
|
|
||||||
- ${{ if eq(parameters.is_official_build, True) }}:
|
|
||||||
- powershell: |
|
|
||||||
Write-Host "##vso[task.setvariable variable=INSTALL_TEST;]0"
|
|
||||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_NUMBER;]1"
|
|
||||||
Set-Variable -Name PYTORCH_VERSION -Value (Get-Content .\version.txt).Substring(0,5)
|
|
||||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$PYTORCH_VERSION.dev"
|
|
||||||
displayName: Set configuration-specific build flags
|
|
||||||
|
|
||||||
# Set PyTorch CPU/GPU build flags..
|
|
||||||
- ${{ if contains(parameters.cuda, 'cpu') }}:
|
|
||||||
- powershell: |
|
|
||||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]0"
|
|
||||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cpu"
|
|
||||||
displayName: Set CUDA-specific build flag for CPU build
|
|
||||||
|
|
||||||
- ${{ if contains(parameters.cuda, 'gpu') }}:
|
|
||||||
- powershell: |
|
|
||||||
Write-Host "##vso[task.setvariable variable=USE_CUDA;]1"
|
|
||||||
Write-Host "##vso[task.setvariable variable=PYTORCH_BUILD_VERSION;]$(PYTORCH_BUILD_VERSION).cu$(CUDA_VERSION)"
|
|
||||||
displayName: Set CUDA-specific build flag for GPU build
|
|
||||||
|
|
||||||
# Set CUDA 11.2, 10.2 or 10.1 specific build flags
|
|
||||||
- ${{ if eq(parameters.cuda, 'gpu') }}:
|
|
||||||
- powershell: |
|
|
||||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5;8.0;8.6"
|
|
||||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.2\"
|
|
||||||
displayName: Set CUDA 11.2 specific build flags
|
|
||||||
condition: eq(variables.CUDA_VERSION, '112')
|
|
||||||
|
|
||||||
- powershell: |
|
|
||||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
|
||||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.2\"
|
|
||||||
displayName: Set CUDA 10.2 specific build flags
|
|
||||||
condition: eq(variables.CUDA_VERSION, '102')
|
|
||||||
|
|
||||||
- powershell: |
|
|
||||||
Write-Host "##vso[task.setvariable variable=TORCH_CUDA_ARCH_LIST;]3.7+PTX;5.0;6.0;6.1;7.0;7.5"
|
|
||||||
Write-Host "##vso[task.setvariable variable=CUDA_PATH;]C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\"
|
|
||||||
displayName: Set CUDA 10.1 specific build flags
|
|
||||||
condition: eq(variables.CUDA_VERSION, '101')
|
|
||||||
|
|
||||||
- powershell: |
|
|
||||||
Write-Host "##vso[task.setvariable variable=CUDA_BIN_PATH;]$env:CUDA_PATH\bin\"
|
|
||||||
Write-Host "##vso[task.setvariable variable=CUDNN_ROOT;]$env:CUDA_PATH"
|
|
||||||
Write-Host "##vso[task.setvariable variable=CUDNN_INCLUDE_DIR;]$env:CUDA_PATH\include\"
|
|
||||||
Write-Host "##vso[task.setvariable variable=CUDNN_LIBRARY;]$env:CUDA_PATH\lib\x64\"
|
|
||||||
Write-Host "##vso[task.prependpath]$env:CUDA_PATH\bin"
|
|
||||||
Write-Host "##vso[task.setvariable variable=TORCH_NVCC_FLAGS;]-Xfatbin -compress-all --no-host-device-move-forward"
|
|
||||||
Write-Host "##vso[task.setvariable variable=THRUST_IGNORE_CUB_VERSION_CHECK;]1"
|
|
||||||
Write-Host "##vso[task.setvariable variable=NVTOOLSEXT_PATH;]C:\Program Files\NVIDIA Corporation\NvToolsExt\"
|
|
||||||
displayName: Set CUDA environment variables
|
|
||||||
|
|
||||||
- powershell: |
|
|
||||||
copy "$(CUDA_BIN_PATH)\cusparse*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "$(CUDA_BIN_PATH)\cublas*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "$(CUDA_BIN_PATH)\cudart*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "$(CUDA_BIN_PATH)\curand*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "$(CUDA_BIN_PATH)\cufft*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "$(CUDA_BIN_PATH)\cusolver*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "$(CUDA_BIN_PATH)\cudnn*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "$(CUDA_BIN_PATH)\nvrtc*64_*.dll*" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64\nvToolsExt64_1.dll*" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "$(CONDA_LIB_PATH)\libiomp*5md.dll" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
copy "$(CONDA_LIB_PATH)\uv.dll" $(Build.SourcesDirectory)\torch\lib
|
|
||||||
displayName: Copy CUDA/cuDNN/libomp/libuv dlls to torch\lib
|
|
||||||
|
|
||||||
# Set MKL, sccache and randomtemp environment variables
|
|
||||||
- powershell: |
|
|
||||||
Write-Host "##vso[task.setvariable variable=CMAKE_INCLUDE_PATH;]$(Build.SourcesDirectory)\mkl\include"
|
|
||||||
Write-Host "##vso[task.setvariable variable=CMAKE_LIBRARY_PATH;]$(Build.SourcesDirectory)\mkl\lib;$env:CMAKE_LIBRARY_PATH"
|
|
||||||
Write-Host "##vso[task.setvariable variable=ADDITIONAL_PATH;]$(Build.SourcesDirectory)\tmp_bin"
|
|
||||||
Write-Host "##vso[task.setvariable variable=SCCACHE_IDLE_TIMEOUT;]1500"
|
|
||||||
Write-Host "##vso[task.setvariable variable=RANDOMTEMP_EXECUTABLE;]$(Build.SourcesDirectory)\tmp_bin\nvcc.exe"
|
|
||||||
Write-Host "##vso[task.setvariable variable=CUDA_NVCC_EXECUTABLE;]$(Build.SourcesDirectory)\tmp_bin\randomtemp.exe"
|
|
||||||
Write-Host "##vso[task.setvariable variable=RANDOMTEMP_BASEDIR;]$(Build.SourcesDirectory)\tmp_bin"
|
|
||||||
displayName: Set MKL, sccache and randomtemp environment variables
|
|
||||||
|
|
||||||
# View current environment variables
|
|
||||||
- script:
|
|
||||||
set
|
|
||||||
displayName: Show environment variables
|
|
||||||
@ -1,14 +0,0 @@
|
|||||||
# Main logic to initiate wait for PR artifact to be ready
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- task: InvokeRESTAPI@1
|
|
||||||
displayName: 'Wait for job success and wheel ready'
|
|
||||||
timeoutInMinutes: 60
|
|
||||||
inputs:
|
|
||||||
connectionType: 'connectedServiceName'
|
|
||||||
serviceConnection: circleciconn
|
|
||||||
method: 'POST'
|
|
||||||
headers: '{"Content-Type":"application/json", "BranchName":"$(TARGET_BRANCH_TO_CHECK_PR)", "JobName":"$(TARGET_CIRCLECI_PR)", "PlanUrl":"$(System.CollectionUri)", "ProjectId":"$(System.TeamProjectId)", "HubName":"$(System.HostType)", "PlanId":"$(System.PlanId)", "JobId":"$(System.JobId)", "TimelineId":"$(System.TimelineId)", "TaskInstanceId":"$(System.TaskInstanceId)", "AuthToken":"$(System.AccessToken)"}'
|
|
||||||
body: ''
|
|
||||||
urlSuffix: 'api/JobStatus'
|
|
||||||
waitForCompletion: true
|
|
||||||
@ -1,49 +0,0 @@
|
|||||||
# Initiate 5 agentless-server waiting jobs to check on the
|
|
||||||
# status of PR artifact builds, for a maximum wait time of
|
|
||||||
# 5 * 60 min =300 minutes. These jobs will pass immediately
|
|
||||||
# once targeted CircleCI build is ready.
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
- job: checkjob1
|
|
||||||
pool: server
|
|
||||||
timeoutInMinutes: 60
|
|
||||||
continueOnError: true
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- template: wheel-wait-job-template.yml
|
|
||||||
|
|
||||||
- job: checkjob2
|
|
||||||
pool: server
|
|
||||||
timeoutInMinutes: 60
|
|
||||||
dependsOn: checkjob1
|
|
||||||
continueOnError: true
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- template: wheel-wait-job-template.yml
|
|
||||||
|
|
||||||
- job: checkjob3
|
|
||||||
pool: server
|
|
||||||
timeoutInMinutes: 60
|
|
||||||
dependsOn: checkjob2
|
|
||||||
continueOnError: true
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- template: wheel-wait-job-template.yml
|
|
||||||
|
|
||||||
- job: checkjob4
|
|
||||||
pool: server
|
|
||||||
timeoutInMinutes: 60
|
|
||||||
dependsOn: checkjob3
|
|
||||||
continueOnError: true
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- template: wheel-wait-job-template.yml
|
|
||||||
|
|
||||||
- job: checkjob5
|
|
||||||
pool: server
|
|
||||||
timeoutInMinutes: 60
|
|
||||||
dependsOn: checkjob4
|
|
||||||
continueOnError: true
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- template: wheel-wait-job-template.yml
|
|
||||||
@ -1,50 +0,0 @@
|
|||||||
# PyTorch Nightly PyTorch Tests Builds Pipeline on Azure DevOps
|
|
||||||
#
|
|
||||||
# This pipeline runs custom PyTorch unit-tests on nightly
|
|
||||||
# PyTorch wheels.
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- stage: 'NightlyCustomTests'
|
|
||||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
|
||||||
jobs:
|
|
||||||
- template: job_templates/pytorch-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_CPU_docker
|
|
||||||
pool: $(BUILD_POOL_LIN_1)
|
|
||||||
customMatrixes:
|
|
||||||
Nightly_Custom_Tests:
|
|
||||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_1)
|
|
||||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_1)
|
|
||||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_1)
|
|
||||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
|
||||||
|
|
||||||
- template: job_templates/pytorch-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_GPU_docker
|
|
||||||
pool: $(BUILD_POOL_LIN_2)
|
|
||||||
customMatrixes:
|
|
||||||
Nightly_Custom_Tests:
|
|
||||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_LIN_2)
|
|
||||||
_PYTHON_VERSION: $(PYTHON_VERSION_LIN_2)
|
|
||||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_LIN_2)
|
|
||||||
_RUN_TESTS: $(RUN_TESTS_LIN)
|
|
||||||
|
|
||||||
- template: job_templates/pytorch-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_CPU
|
|
||||||
pool: $(BUILD_POOL_WIN_1)
|
|
||||||
customMatrixes:
|
|
||||||
Nightly_Custom_Tests:
|
|
||||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_1)
|
|
||||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_1)
|
|
||||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
|
||||||
|
|
||||||
- template: job_templates/pytorch-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_GPU
|
|
||||||
pool: $(BUILD_POOL_WIN_2)
|
|
||||||
customMatrixes:
|
|
||||||
Nightly_Custom_Tests:
|
|
||||||
_PYTHON_VERSION: $(PYTHON_VERSION_WIN_2)
|
|
||||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_WIN_2)
|
|
||||||
_RUN_TESTS: $(RUN_TESTS_WIN)
|
|
||||||
@ -1,30 +0,0 @@
|
|||||||
# PyTorch PR PyTorch Tests Builds Pipeline on Azure DevOps
|
|
||||||
#
|
|
||||||
# This pipeline:
|
|
||||||
# 1) ensures that CircleCI builds for a given PR
|
|
||||||
# have finished, and that its artifacts are
|
|
||||||
# ready for download
|
|
||||||
# 2) runs custom PyTorch unit-tests on PyTorch
|
|
||||||
# wheels generated during PR builds.
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- stage: 'EnsureArtifactsReady'
|
|
||||||
displayName: 'Ensure PyTorch PR Artifacts are ready'
|
|
||||||
jobs:
|
|
||||||
- template: job_templates/wheel-wait-template.yml
|
|
||||||
|
|
||||||
- stage: 'PRCustomTests'
|
|
||||||
displayName: 'Run custom unit tests on PyTorch wheels'
|
|
||||||
jobs:
|
|
||||||
- template: job_templates/pytorch-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_GPU_docker
|
|
||||||
pool: $(BUILD_POOL_PR)
|
|
||||||
customMatrixes:
|
|
||||||
PR_Custom_Tests:
|
|
||||||
_PYTHON_VERSION: $(PYTHON_VERSION_PR)
|
|
||||||
_CUDA_BUILD_VERSION: $(CUDA_BUILD_VERSION_PR)
|
|
||||||
_TARGET_CIRCLECI_BUILD: $(TARGET_CIRCLECI_PR)
|
|
||||||
_TARGET_BRANCH_TO_CHECK: $(TARGET_BRANCH_TO_CHECK_PR)
|
|
||||||
_DOCKER_IMAGE: $(DOCKER_IMAGE_PR)
|
|
||||||
_RUN_TESTS: $(RUN_TESTS_PR)
|
|
||||||
@ -1,224 +0,0 @@
|
|||||||
# PyTorch Official Builds Pipeline on Azure DevOps
|
|
||||||
#
|
|
||||||
# This pipeline:
|
|
||||||
# 1) builds PyTorch on all available configurations
|
|
||||||
# 2) verifies PyTorch artifacts by installing them in a clean environment
|
|
||||||
# and checking torch.__version_
|
|
||||||
# 3) publishes official PyTorch artifacts to Azure DevOps Artifacts for consumption
|
|
||||||
|
|
||||||
stages:
|
|
||||||
- stage: 'Build'
|
|
||||||
displayName: 'Build PyTorch'
|
|
||||||
jobs:
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_CPU_docker
|
|
||||||
pool: 'PyTorch-Linux-CPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
build_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
os: ubuntu
|
|
||||||
cuda: cpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_38:
|
|
||||||
configuration: ubuntu_1804_py_38_cpu
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
|
||||||
Py_37:
|
|
||||||
configuration: ubuntu_1804_py_37_cpu
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_GPU_docker
|
|
||||||
pool: 'PyTorch-Linux-GPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
build_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
os: ubuntu
|
|
||||||
cuda: gpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_39_CUDA_112_cuDNN_810:
|
|
||||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
|
||||||
CUDA_VERSION: 112
|
|
||||||
Py_38_CUDA_102_cuDNN_810:
|
|
||||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
|
||||||
CUDA_VERSION: 102
|
|
||||||
Py_37_CUDA_101_cuDNN_765:
|
|
||||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
|
||||||
CUDA_VERSION: 101
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_CPU
|
|
||||||
pool: 'PyTorch-Win-CPU'
|
|
||||||
build_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
os: windows
|
|
||||||
cuda: cpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_38:
|
|
||||||
configuration: windows_2019_py_38_cpu
|
|
||||||
Py_37:
|
|
||||||
configuration: windows_2019_py_37_cpu
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_GPU
|
|
||||||
pool: 'PyTorch-Win-GPU'
|
|
||||||
build_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
os: windows
|
|
||||||
cuda: gpu
|
|
||||||
customMatrixes:
|
|
||||||
Py_39_CUDA_112_cuDNN_810:
|
|
||||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
|
||||||
CUDA_VERSION: 112
|
|
||||||
Py_38_CUDA_102_cuDNN_765:
|
|
||||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
|
||||||
CUDA_VERSION: 102
|
|
||||||
Py_37_CUDA_101_cuDNN_764:
|
|
||||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
|
||||||
CUDA_VERSION: 101
|
|
||||||
|
|
||||||
- stage: 'Verify'
|
|
||||||
displayName: 'Verify PyTorch wheels'
|
|
||||||
dependsOn: Build
|
|
||||||
condition: succeeded()
|
|
||||||
jobs:
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_CPU_docker
|
|
||||||
pool: 'PyTorch-Linux-CPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
verify_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
customMatrixes:
|
|
||||||
Py_38:
|
|
||||||
configuration: ubuntu_1804_py_38_cpu
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
|
||||||
Py_37:
|
|
||||||
configuration: ubuntu_1804_py_37_cpu
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_GPU_docker
|
|
||||||
pool: 'PyTorch-Linux-GPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
verify_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
customMatrixes:
|
|
||||||
Py_39_CUDA_112_cuDNN_810:
|
|
||||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
|
||||||
CUDA_VERSION: 112
|
|
||||||
Py_38_CUDA_102_cuDNN_810:
|
|
||||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
|
||||||
CUDA_VERSION: 102
|
|
||||||
Py_37_CUDA_101_cuDNN_765:
|
|
||||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
|
||||||
CUDA_VERSION: 101
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_CPU
|
|
||||||
pool: 'PyTorch-Win-CPU'
|
|
||||||
verify_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
customMatrixes:
|
|
||||||
Py_38:
|
|
||||||
configuration: windows_2019_py_38_cpu
|
|
||||||
Py_37:
|
|
||||||
configuration: windows_2019_py_37_cpu
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_GPU
|
|
||||||
pool: 'PyTorch-Win-GPU'
|
|
||||||
verify_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
customMatrixes:
|
|
||||||
Py_39_CUDA_112_cuDNN_810:
|
|
||||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
|
||||||
CUDA_VERSION: 112
|
|
||||||
Py_38_CUDA_102_cuDNN_765:
|
|
||||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
|
||||||
CUDA_VERSION: 102
|
|
||||||
Py_37_CUDA_101_cuDNN_764:
|
|
||||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
|
||||||
CUDA_VERSION: 101
|
|
||||||
|
|
||||||
- stage: 'Publish'
|
|
||||||
displayName: 'Publish PyTorch wheels'
|
|
||||||
dependsOn: Verify
|
|
||||||
condition: succeeded()
|
|
||||||
jobs:
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_CPU_docker
|
|
||||||
pool: 'PyTorch-Linux-CPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
publish_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
customMatrixes:
|
|
||||||
Py_38:
|
|
||||||
configuration: ubuntu_1804_py_38_cpu
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cpu_dev
|
|
||||||
Py_37:
|
|
||||||
configuration: ubuntu_1804_py_37_cpu
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cpu_dev
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-unix.yml
|
|
||||||
parameters:
|
|
||||||
name: ubuntu_1804_GPU_docker
|
|
||||||
pool: 'PyTorch-Linux-GPU'
|
|
||||||
container_endpoint: pytorchms.azurecr.io
|
|
||||||
publish_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
customMatrixes:
|
|
||||||
Py_39_CUDA_112_cuDNN_810:
|
|
||||||
configuration: ubuntu_1804_py_39_cuda_112_cudnn_810
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_39_cuda_112_cudnn_8_dev
|
|
||||||
CUDA_VERSION: 112
|
|
||||||
Py_38_CUDA_102_cuDNN_810:
|
|
||||||
configuration: ubuntu_1804_py_38_cuda_102_cudnn_810
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_38_cuda_102_cudnn_8_dev
|
|
||||||
CUDA_VERSION: 102
|
|
||||||
Py_37_CUDA_101_cuDNN_765:
|
|
||||||
configuration: ubuntu_1804_py_37_cuda_101_cudnn_765
|
|
||||||
container_image: pytorchms.azurecr.io/ubuntu_1804_py_37_cuda_101_cudnn_7_dev
|
|
||||||
CUDA_VERSION: 101
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_CPU
|
|
||||||
pool: 'PyTorch-Win-CPU'
|
|
||||||
publish_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
customMatrixes:
|
|
||||||
Py_38:
|
|
||||||
configuration: windows_2019_py_38_cpu
|
|
||||||
Py_37:
|
|
||||||
configuration: windows_2019_py_37_cpu
|
|
||||||
|
|
||||||
- template: job_templates/build-verify-publish-template-win.yml
|
|
||||||
parameters:
|
|
||||||
name: windows_2019_GPU
|
|
||||||
pool: 'PyTorch-Win-GPU'
|
|
||||||
publish_stage: True
|
|
||||||
is_official_build: True
|
|
||||||
customMatrixes:
|
|
||||||
Py_39_CUDA_112_cuDNN_810:
|
|
||||||
configuration: windows_2019_py_39_cuda_112_cudnn_810
|
|
||||||
CUDA_VERSION: 112
|
|
||||||
Py_38_CUDA_102_cuDNN_765:
|
|
||||||
configuration: windows_2019_py_38_cuda_102_cudnn_765
|
|
||||||
CUDA_VERSION: 102
|
|
||||||
Py_37_CUDA_101_cuDNN_764:
|
|
||||||
configuration: windows_2019_py_37_cuda_101_cudnn_764
|
|
||||||
CUDA_VERSION: 101
|
|
||||||
@ -55,15 +55,14 @@ CONFIG_TREE_DATA = OrderedDict(
|
|||||||
macos_arm64=([None], OrderedDict(
|
macos_arm64=([None], OrderedDict(
|
||||||
wheel=[
|
wheel=[
|
||||||
"3.8",
|
"3.8",
|
||||||
"3.9",
|
|
||||||
],
|
],
|
||||||
conda=[
|
conda=[
|
||||||
"3.8",
|
"3.8",
|
||||||
"3.9",
|
|
||||||
],
|
],
|
||||||
)),
|
)),
|
||||||
|
# Skip CUDA-9.2 builds on Windows
|
||||||
windows=(
|
windows=(
|
||||||
[v for v in dimensions.GPU_VERSIONS if v not in dimensions.ROCM_VERSION_LABELS],
|
[v for v in dimensions.GPU_VERSIONS if v not in ['cuda92'] + dimensions.ROCM_VERSION_LABELS],
|
||||||
OrderedDict(
|
OrderedDict(
|
||||||
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
wheel=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||||
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
conda=dimensions.STANDARD_PYTHON_VERSIONS,
|
||||||
|
|||||||
@ -27,19 +27,7 @@ class Conf(object):
|
|||||||
|
|
||||||
def gen_docker_image(self):
|
def gen_docker_image(self):
|
||||||
if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
|
if self.gcc_config_variant == 'gcc5.4_cxx11-abi':
|
||||||
if self.gpu_version is None:
|
return miniutils.quote("pytorch/pytorch-binary-docker-image-ubuntu16.04:latest")
|
||||||
return miniutils.quote("pytorch/libtorch-cxx11-builder:cpu")
|
|
||||||
else:
|
|
||||||
return miniutils.quote(
|
|
||||||
f"pytorch/libtorch-cxx11-builder:{self.gpu_version}"
|
|
||||||
)
|
|
||||||
if self.pydistro == "conda":
|
|
||||||
if self.gpu_version is None:
|
|
||||||
return miniutils.quote("pytorch/conda-builder:cpu")
|
|
||||||
else:
|
|
||||||
return miniutils.quote(
|
|
||||||
f"pytorch/conda-builder:{self.gpu_version}"
|
|
||||||
)
|
|
||||||
|
|
||||||
docker_word_substitution = {
|
docker_word_substitution = {
|
||||||
"manywheel": "manylinux",
|
"manywheel": "manylinux",
|
||||||
|
|||||||
@ -1,14 +1,14 @@
|
|||||||
PHASES = ["build", "test"]
|
PHASES = ["build", "test"]
|
||||||
|
|
||||||
CUDA_VERSIONS = [
|
CUDA_VERSIONS = [
|
||||||
|
"101",
|
||||||
"102",
|
"102",
|
||||||
"111",
|
"111",
|
||||||
]
|
]
|
||||||
|
|
||||||
ROCM_VERSIONS = [
|
ROCM_VERSIONS = [
|
||||||
|
"3.10",
|
||||||
"4.0.1",
|
"4.0.1",
|
||||||
"4.1",
|
|
||||||
"4.2",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
|
ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
|
||||||
|
|||||||
@ -32,9 +32,24 @@ CONFIG_TREE_DATA = [
|
|||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
("cuda", [
|
("cuda", [
|
||||||
|
("9.2", [
|
||||||
|
("3.6", [
|
||||||
|
X(True),
|
||||||
|
("cuda_gcc_override", [
|
||||||
|
("gcc5.4", [
|
||||||
|
('build_only', [XImportant(True)]),
|
||||||
|
]),
|
||||||
|
]),
|
||||||
|
])
|
||||||
|
]),
|
||||||
|
("10.1", [
|
||||||
|
("3.6", [
|
||||||
|
('build_only', [X(True)]),
|
||||||
|
]),
|
||||||
|
]),
|
||||||
("10.2", [
|
("10.2", [
|
||||||
("3.6", [
|
("3.6", [
|
||||||
("shard_test", [X(True)]),
|
("shard_test", [XImportant(True)]),
|
||||||
("libtorch", [
|
("libtorch", [
|
||||||
(True, [
|
(True, [
|
||||||
('build_only', [X(True)]),
|
('build_only', [X(True)]),
|
||||||
@ -44,10 +59,10 @@ CONFIG_TREE_DATA = [
|
|||||||
]),
|
]),
|
||||||
("11.1", [
|
("11.1", [
|
||||||
("3.8", [
|
("3.8", [
|
||||||
("shard_test", [XImportant(True)]),
|
X(True),
|
||||||
("libtorch", [
|
("libtorch", [
|
||||||
(True, [
|
(True, [
|
||||||
('build_only', [X(True)]),
|
('build_only', [XImportant(True)]),
|
||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
@ -57,9 +72,7 @@ CONFIG_TREE_DATA = [
|
|||||||
("bionic", [
|
("bionic", [
|
||||||
("clang", [
|
("clang", [
|
||||||
("9", [
|
("9", [
|
||||||
("3.6", [
|
XImportant("3.6"),
|
||||||
("noarch", [XImportant(True)]),
|
|
||||||
]),
|
|
||||||
]),
|
]),
|
||||||
("9", [
|
("9", [
|
||||||
("3.6", [
|
("3.6", [
|
||||||
@ -68,13 +81,6 @@ CONFIG_TREE_DATA = [
|
|||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
("cuda", [
|
|
||||||
("10.2", [
|
|
||||||
("3.9", [
|
|
||||||
("shard_test", [XImportant(True)]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
]),
|
|
||||||
("gcc", [
|
("gcc", [
|
||||||
("9", [
|
("9", [
|
||||||
("3.8", [
|
("3.8", [
|
||||||
@ -145,8 +151,6 @@ class PyVerConfigNode(TreeConfigNode):
|
|||||||
def init2(self, node_name):
|
def init2(self, node_name):
|
||||||
self.props["pyver"] = node_name
|
self.props["pyver"] = node_name
|
||||||
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
|
self.props["abbreviated_pyver"] = get_major_pyver(node_name)
|
||||||
if node_name == "3.9":
|
|
||||||
self.props["abbreviated_pyver"] = "py3.9"
|
|
||||||
|
|
||||||
# noinspection PyMethodMayBeStatic
|
# noinspection PyMethodMayBeStatic
|
||||||
def child_constructor(self):
|
def child_constructor(self):
|
||||||
@ -163,10 +167,8 @@ class ExperimentalFeatureConfigNode(TreeConfigNode):
|
|||||||
next_nodes = {
|
next_nodes = {
|
||||||
"asan": AsanConfigNode,
|
"asan": AsanConfigNode,
|
||||||
"xla": XlaConfigNode,
|
"xla": XlaConfigNode,
|
||||||
"mlc": MLCConfigNode,
|
|
||||||
"vulkan": VulkanConfigNode,
|
"vulkan": VulkanConfigNode,
|
||||||
"parallel_tbb": ParallelTBBConfigNode,
|
"parallel_tbb": ParallelTBBConfigNode,
|
||||||
"noarch": NoarchConfigNode,
|
|
||||||
"parallel_native": ParallelNativeConfigNode,
|
"parallel_native": ParallelNativeConfigNode,
|
||||||
"onnx": ONNXConfigNode,
|
"onnx": ONNXConfigNode,
|
||||||
"libtorch": LibTorchConfigNode,
|
"libtorch": LibTorchConfigNode,
|
||||||
@ -201,16 +203,6 @@ class XlaConfigNode(TreeConfigNode):
|
|||||||
def child_constructor(self):
|
def child_constructor(self):
|
||||||
return ImportantConfigNode
|
return ImportantConfigNode
|
||||||
|
|
||||||
class MLCConfigNode(TreeConfigNode):
|
|
||||||
def modify_label(self, label):
|
|
||||||
return "MLC=" + str(label)
|
|
||||||
|
|
||||||
def init2(self, node_name):
|
|
||||||
self.props["is_mlc"] = node_name
|
|
||||||
|
|
||||||
def child_constructor(self):
|
|
||||||
return ImportantConfigNode
|
|
||||||
|
|
||||||
|
|
||||||
class AsanConfigNode(TreeConfigNode):
|
class AsanConfigNode(TreeConfigNode):
|
||||||
def modify_label(self, label):
|
def modify_label(self, label):
|
||||||
@ -256,14 +248,6 @@ class ParallelTBBConfigNode(TreeConfigNode):
|
|||||||
return ImportantConfigNode
|
return ImportantConfigNode
|
||||||
|
|
||||||
|
|
||||||
class NoarchConfigNode(TreeConfigNode):
|
|
||||||
def init2(self, node_name):
|
|
||||||
self.props["is_noarch"] = node_name
|
|
||||||
|
|
||||||
def child_constructor(self):
|
|
||||||
return ImportantConfigNode
|
|
||||||
|
|
||||||
|
|
||||||
class ParallelNativeConfigNode(TreeConfigNode):
|
class ParallelNativeConfigNode(TreeConfigNode):
|
||||||
def modify_label(self, label):
|
def modify_label(self, label):
|
||||||
return "PARALLELNATIVE=" + str(label)
|
return "PARALLELNATIVE=" + str(label)
|
||||||
|
|||||||
@ -273,7 +273,6 @@ def instantiate_configs():
|
|||||||
is_xla = fc.find_prop("is_xla") or False
|
is_xla = fc.find_prop("is_xla") or False
|
||||||
is_asan = fc.find_prop("is_asan") or False
|
is_asan = fc.find_prop("is_asan") or False
|
||||||
is_coverage = fc.find_prop("is_coverage") or False
|
is_coverage = fc.find_prop("is_coverage") or False
|
||||||
is_noarch = fc.find_prop("is_noarch") or False
|
|
||||||
is_onnx = fc.find_prop("is_onnx") or False
|
is_onnx = fc.find_prop("is_onnx") or False
|
||||||
is_pure_torch = fc.find_prop("is_pure_torch") or False
|
is_pure_torch = fc.find_prop("is_pure_torch") or False
|
||||||
is_vulkan = fc.find_prop("is_vulkan") or False
|
is_vulkan = fc.find_prop("is_vulkan") or False
|
||||||
@ -317,9 +316,6 @@ def instantiate_configs():
|
|||||||
parms_list_ignored_for_docker_image.append("coverage")
|
parms_list_ignored_for_docker_image.append("coverage")
|
||||||
python_version = fc.find_prop("pyver")
|
python_version = fc.find_prop("pyver")
|
||||||
|
|
||||||
if is_noarch:
|
|
||||||
parms_list_ignored_for_docker_image.append("noarch")
|
|
||||||
|
|
||||||
if is_onnx:
|
if is_onnx:
|
||||||
parms_list.append("onnx")
|
parms_list.append("onnx")
|
||||||
python_version = fc.find_prop("pyver")
|
python_version = fc.find_prop("pyver")
|
||||||
|
|||||||
@ -2,7 +2,6 @@ import cimodel.data.simple.util.branch_filters as branch_filters
|
|||||||
from cimodel.data.simple.util.docker_constants import (
|
from cimodel.data.simple.util.docker_constants import (
|
||||||
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK
|
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK
|
||||||
)
|
)
|
||||||
import cimodel.lib.miniutils as miniutils
|
|
||||||
|
|
||||||
|
|
||||||
class AndroidJob:
|
class AndroidJob:
|
||||||
@ -52,15 +51,13 @@ class AndroidGradleJob:
|
|||||||
template_name,
|
template_name,
|
||||||
dependencies,
|
dependencies,
|
||||||
is_master_only=True,
|
is_master_only=True,
|
||||||
is_pr_only=False,
|
is_pr_only=False):
|
||||||
extra_props=tuple()):
|
|
||||||
|
|
||||||
self.job_name = job_name
|
self.job_name = job_name
|
||||||
self.template_name = template_name
|
self.template_name = template_name
|
||||||
self.dependencies = dependencies
|
self.dependencies = dependencies
|
||||||
self.is_master_only = is_master_only
|
self.is_master_only = is_master_only
|
||||||
self.is_pr_only = is_pr_only
|
self.is_pr_only = is_pr_only
|
||||||
self.extra_props = dict(extra_props)
|
|
||||||
|
|
||||||
def gen_tree(self):
|
def gen_tree(self):
|
||||||
|
|
||||||
@ -73,8 +70,6 @@ class AndroidGradleJob:
|
|||||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
|
||||||
elif self.is_pr_only:
|
elif self.is_pr_only:
|
||||||
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.PR_BRANCH_LIST)
|
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.PR_BRANCH_LIST)
|
||||||
if self.extra_props:
|
|
||||||
props_dict.update(self.extra_props)
|
|
||||||
|
|
||||||
return [{self.template_name: props_dict}]
|
return [{self.template_name: props_dict}]
|
||||||
|
|
||||||
@ -96,15 +91,6 @@ WORKFLOW_DATA = [
|
|||||||
[DOCKER_REQUIREMENT_NDK],
|
[DOCKER_REQUIREMENT_NDK],
|
||||||
is_master_only=False,
|
is_master_only=False,
|
||||||
is_pr_only=True),
|
is_pr_only=True),
|
||||||
AndroidGradleJob(
|
|
||||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single-full-jit",
|
|
||||||
"pytorch_android_gradle_custom_build_single",
|
|
||||||
[DOCKER_REQUIREMENT_NDK],
|
|
||||||
is_master_only=False,
|
|
||||||
is_pr_only=True,
|
|
||||||
extra_props=tuple({
|
|
||||||
"lite_interpreter": miniutils.quote(str(int(False)))
|
|
||||||
}.items())),
|
|
||||||
AndroidGradleJob(
|
AndroidGradleJob(
|
||||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build",
|
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-build",
|
||||||
"pytorch_android_gradle_build",
|
"pytorch_android_gradle_build",
|
||||||
|
|||||||
@ -77,7 +77,7 @@ WORKFLOW_DATA = [
|
|||||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||||
"pytorch/manylinux-cuda102",
|
"pytorch/manylinux-cuda102",
|
||||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build",
|
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build",
|
||||||
is_master_only=True,
|
is_master_only=False,
|
||||||
has_libtorch_variant=True,
|
has_libtorch_variant=True,
|
||||||
),
|
),
|
||||||
SmoketestJob(
|
SmoketestJob(
|
||||||
@ -109,14 +109,14 @@ WORKFLOW_DATA = [
|
|||||||
["libtorch", "3.7", "cpu", "debug"],
|
["libtorch", "3.7", "cpu", "debug"],
|
||||||
None,
|
None,
|
||||||
"binary_windows_libtorch_3_7_cpu_debug_build",
|
"binary_windows_libtorch_3_7_cpu_debug_build",
|
||||||
is_master_only=True,
|
is_master_only=False,
|
||||||
),
|
),
|
||||||
SmoketestJob(
|
SmoketestJob(
|
||||||
"binary_windows_build",
|
"binary_windows_build",
|
||||||
["libtorch", "3.7", "cpu", "release"],
|
["libtorch", "3.7", "cpu", "release"],
|
||||||
None,
|
None,
|
||||||
"binary_windows_libtorch_3_7_cpu_release_build",
|
"binary_windows_libtorch_3_7_cpu_release_build",
|
||||||
is_master_only=True,
|
is_master_only=False,
|
||||||
),
|
),
|
||||||
SmoketestJob(
|
SmoketestJob(
|
||||||
"binary_windows_build",
|
"binary_windows_build",
|
||||||
@ -131,7 +131,7 @@ WORKFLOW_DATA = [
|
|||||||
["libtorch", "3.7", "cpu", "debug"],
|
["libtorch", "3.7", "cpu", "debug"],
|
||||||
None,
|
None,
|
||||||
"binary_windows_libtorch_3_7_cpu_debug_test",
|
"binary_windows_libtorch_3_7_cpu_debug_test",
|
||||||
is_master_only=True,
|
is_master_only=False,
|
||||||
requires=["binary_windows_libtorch_3_7_cpu_debug_build"],
|
requires=["binary_windows_libtorch_3_7_cpu_debug_build"],
|
||||||
),
|
),
|
||||||
SmoketestJob(
|
SmoketestJob(
|
||||||
@ -173,7 +173,7 @@ WORKFLOW_DATA = [
|
|||||||
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
["libtorch", "3.7m", "cpu", "devtoolset7"],
|
||||||
"pytorch/manylinux-cuda102",
|
"pytorch/manylinux-cuda102",
|
||||||
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test",
|
"binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_test",
|
||||||
is_master_only=True,
|
is_master_only=False,
|
||||||
requires=["binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build"],
|
requires=["binary_linux_libtorch_3_7m_cpu_devtoolset7_shared-with-deps_build"],
|
||||||
has_libtorch_variant=True,
|
has_libtorch_variant=True,
|
||||||
),
|
),
|
||||||
@ -182,7 +182,7 @@ WORKFLOW_DATA = [
|
|||||||
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
["libtorch", "3.7m", "cpu", "gcc5.4_cxx11-abi"],
|
||||||
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
"pytorch/pytorch-binary-docker-image-ubuntu16.04:latest",
|
||||||
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test",
|
"binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_test",
|
||||||
is_master_only=True,
|
is_master_only=False,
|
||||||
requires=["binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build"],
|
requires=["binary_linux_libtorch_3_7m_cpu_gcc5_4_cxx11-abi_shared-with-deps_build"],
|
||||||
has_libtorch_variant=True,
|
has_libtorch_variant=True,
|
||||||
),
|
),
|
||||||
|
|||||||
@ -6,16 +6,21 @@ from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN
|
|||||||
|
|
||||||
# TODO: make this generated from a matrix rather than just a static list
|
# TODO: make this generated from a matrix rather than just a static list
|
||||||
IMAGE_NAMES = [
|
IMAGE_NAMES = [
|
||||||
|
"pytorch-linux-bionic-cuda11.1-cudnn8-py3.6-gcc9",
|
||||||
|
"pytorch-linux-bionic-cuda11.1-cudnn8-py3.8-gcc9",
|
||||||
|
"pytorch-linux-bionic-cuda11.0-cudnn8-py3.6-gcc9",
|
||||||
|
"pytorch-linux-bionic-cuda11.0-cudnn8-py3.8-gcc9",
|
||||||
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.8-gcc9",
|
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.8-gcc9",
|
||||||
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7",
|
|
||||||
"pytorch-linux-bionic-py3.6-clang9",
|
"pytorch-linux-bionic-py3.6-clang9",
|
||||||
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9",
|
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9",
|
||||||
"pytorch-linux-bionic-py3.8-gcc9",
|
"pytorch-linux-bionic-py3.8-gcc9",
|
||||||
"pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7",
|
"pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7",
|
||||||
"pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7",
|
"pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7",
|
||||||
"pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
|
"pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
|
||||||
|
"pytorch-linux-xenial-cuda11.0-cudnn8-py3-gcc7",
|
||||||
"pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
|
"pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
|
||||||
"pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7",
|
"pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc5.4",
|
||||||
|
"pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7",
|
||||||
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
||||||
"pytorch-linux-xenial-py3-clang5-asan",
|
"pytorch-linux-xenial-py3-clang5-asan",
|
||||||
"pytorch-linux-xenial-py3-clang7-onnx",
|
"pytorch-linux-xenial-py3-clang7-onnx",
|
||||||
@ -25,9 +30,7 @@ IMAGE_NAMES = [
|
|||||||
"pytorch-linux-xenial-py3.6-gcc7.2",
|
"pytorch-linux-xenial-py3.6-gcc7.2",
|
||||||
"pytorch-linux-xenial-py3.6-gcc7",
|
"pytorch-linux-xenial-py3.6-gcc7",
|
||||||
"pytorch-linux-bionic-rocm3.9-py3.6",
|
"pytorch-linux-bionic-rocm3.9-py3.6",
|
||||||
"pytorch-linux-bionic-rocm4.0.1-py3.6",
|
"pytorch-linux-bionic-rocm3.10-py3.6",
|
||||||
"pytorch-linux-bionic-rocm4.1-py3.6",
|
|
||||||
"pytorch-linux-bionic-rocm4.2-py3.6",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -61,20 +61,10 @@ class IOSJob:
|
|||||||
|
|
||||||
|
|
||||||
WORKFLOW_DATA = [
|
WORKFLOW_DATA = [
|
||||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64"), is_org_member_context=False, extra_props={
|
IOSJob(XCODE_VERSION, ArchVariant("x86_64"), is_org_member_context=False),
|
||||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
IOSJob(XCODE_VERSION, ArchVariant("arm64")),
|
||||||
IOSJob(XCODE_VERSION, ArchVariant("x86_64", "full_jit"), is_org_member_context=False, extra_props={
|
IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={"use_metal": miniutils.quote(str(int(True)))}),
|
||||||
"lite_interpreter": miniutils.quote(str(int(False)))}),
|
IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom"), extra_props={"op_list": "mobilenetv2.yaml"}),
|
||||||
IOSJob(XCODE_VERSION, ArchVariant("arm64"), extra_props={
|
|
||||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
|
||||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "metal"), extra_props={
|
|
||||||
"use_metal": miniutils.quote(str(int(True))),
|
|
||||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
|
||||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "full_jit"), extra_props={
|
|
||||||
"lite_interpreter": miniutils.quote(str(int(False)))}),
|
|
||||||
IOSJob(XCODE_VERSION, ArchVariant("arm64", "custom"), extra_props={
|
|
||||||
"op_list": "mobilenetv2.yaml",
|
|
||||||
"lite_interpreter": miniutils.quote(str(int(True)))}),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,22 +1,14 @@
|
|||||||
class MacOsJob:
|
class MacOsJob:
|
||||||
def __init__(self, os_version, is_build=False, is_test=False, extra_props=tuple()):
|
def __init__(self, os_version, is_test=False):
|
||||||
# extra_props is tuple type, because mutable data structures for argument defaults
|
|
||||||
# is not recommended.
|
|
||||||
self.os_version = os_version
|
self.os_version = os_version
|
||||||
self.is_build = is_build
|
|
||||||
self.is_test = is_test
|
self.is_test = is_test
|
||||||
self.extra_props = dict(extra_props)
|
|
||||||
|
|
||||||
def gen_tree(self):
|
def gen_tree(self):
|
||||||
non_phase_parts = ["pytorch", "macos", self.os_version, "py3"]
|
non_phase_parts = ["pytorch", "macos", self.os_version, "py3"]
|
||||||
|
|
||||||
extra_name_list = [name for name, exist in self.extra_props.items() if exist]
|
phase_name = "test" if self.is_test else "build"
|
||||||
full_job_name_list = non_phase_parts + extra_name_list + [
|
|
||||||
'build' if self.is_build else None,
|
|
||||||
'test' if self.is_test else None,
|
|
||||||
]
|
|
||||||
|
|
||||||
full_job_name = "_".join(list(filter(None, full_job_name_list)))
|
full_job_name = "_".join(non_phase_parts + [phase_name])
|
||||||
|
|
||||||
test_build_dependency = "_".join(non_phase_parts + ["build"])
|
test_build_dependency = "_".join(non_phase_parts + ["build"])
|
||||||
extra_dependencies = [test_build_dependency] if self.is_test else []
|
extra_dependencies = [test_build_dependency] if self.is_test else []
|
||||||
@ -29,23 +21,7 @@ class MacOsJob:
|
|||||||
return [{full_job_name: props_dict}]
|
return [{full_job_name: props_dict}]
|
||||||
|
|
||||||
|
|
||||||
WORKFLOW_DATA = [
|
WORKFLOW_DATA = [MacOsJob("10_13"), MacOsJob("10_13", True)]
|
||||||
MacOsJob("10_15", is_build=True),
|
|
||||||
MacOsJob("10_13", is_build=True),
|
|
||||||
MacOsJob(
|
|
||||||
"10_13",
|
|
||||||
is_build=False,
|
|
||||||
is_test=True,
|
|
||||||
),
|
|
||||||
MacOsJob(
|
|
||||||
"10_13",
|
|
||||||
is_build=True,
|
|
||||||
is_test=True,
|
|
||||||
extra_props=tuple({
|
|
||||||
"lite_interpreter": True
|
|
||||||
}.items()),
|
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def get_workflow_jobs():
|
def get_workflow_jobs():
|
||||||
|
|||||||
@ -65,12 +65,6 @@ WORKFLOW_DATA = [
|
|||||||
["custom", "build", "dynamic"]
|
["custom", "build", "dynamic"]
|
||||||
),
|
),
|
||||||
|
|
||||||
MobileJob(
|
|
||||||
DOCKER_IMAGE_NDK,
|
|
||||||
[DOCKER_REQUIREMENT_NDK],
|
|
||||||
["custom", "build", "static"]
|
|
||||||
),
|
|
||||||
|
|
||||||
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
|
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
|
||||||
# Most of this CI is already covered by "mobile-custom-build-dynamic" job
|
# Most of this CI is already covered by "mobile-custom-build-dynamic" job
|
||||||
MobileJob(
|
MobileJob(
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
|
import cimodel.data.simple.util.branch_filters
|
||||||
import cimodel.lib.miniutils as miniutils
|
import cimodel.lib.miniutils as miniutils
|
||||||
from cimodel.data.simple.util.branch_filters import gen_filter_dict, RC_PATTERN, NON_PR_BRANCH_LIST
|
|
||||||
from cimodel.data.simple.util.versions import CudaVersion
|
from cimodel.data.simple.util.versions import CudaVersion
|
||||||
|
|
||||||
|
|
||||||
@ -10,19 +10,13 @@ class WindowsJob:
|
|||||||
vscode_spec,
|
vscode_spec,
|
||||||
cuda_version,
|
cuda_version,
|
||||||
force_on_cpu=False,
|
force_on_cpu=False,
|
||||||
multi_gpu=False,
|
master_only_pred=lambda job: job.vscode_spec.year != 2019,
|
||||||
master_only=False,
|
|
||||||
nightly_only=False,
|
|
||||||
master_and_nightly=False
|
|
||||||
):
|
):
|
||||||
self.test_index = test_index
|
self.test_index = test_index
|
||||||
self.vscode_spec = vscode_spec
|
self.vscode_spec = vscode_spec
|
||||||
self.cuda_version = cuda_version
|
self.cuda_version = cuda_version
|
||||||
self.force_on_cpu = force_on_cpu
|
self.force_on_cpu = force_on_cpu
|
||||||
self.multi_gpu = multi_gpu
|
self.master_only_pred = master_only_pred
|
||||||
self.master_only = master_only
|
|
||||||
self.nightly_only = nightly_only
|
|
||||||
self.master_and_nightly = master_and_nightly
|
|
||||||
|
|
||||||
def gen_tree(self):
|
def gen_tree(self):
|
||||||
|
|
||||||
@ -31,10 +25,7 @@ class WindowsJob:
|
|||||||
base_phase if self.test_index is None else base_phase + str(self.test_index)
|
base_phase if self.test_index is None else base_phase + str(self.test_index)
|
||||||
)
|
)
|
||||||
|
|
||||||
key_parts = ["pytorch", "windows", base_phase]
|
key_name = "_".join(["pytorch", "windows", base_phase])
|
||||||
if self.multi_gpu:
|
|
||||||
key_parts.append('multigpu')
|
|
||||||
key_name = "_".join(key_parts)
|
|
||||||
|
|
||||||
cpu_forcing_name_parts = ["on", "cpu"] if self.force_on_cpu else []
|
cpu_forcing_name_parts = ["on", "cpu"] if self.force_on_cpu else []
|
||||||
|
|
||||||
@ -70,47 +61,35 @@ class WindowsJob:
|
|||||||
|
|
||||||
is_running_on_cuda = bool(self.cuda_version) and not self.force_on_cpu
|
is_running_on_cuda = bool(self.cuda_version) and not self.force_on_cpu
|
||||||
|
|
||||||
if self.multi_gpu:
|
props_dict = {
|
||||||
props_dict = {"requires": prerequisite_jobs}
|
"build_environment": build_environment_string,
|
||||||
else:
|
"python_version": miniutils.quote("3.6"),
|
||||||
props_dict = {
|
"vc_version": miniutils.quote(self.vscode_spec.dotted_version()),
|
||||||
"build_environment": build_environment_string,
|
"vc_year": miniutils.quote(str(self.vscode_spec.year)),
|
||||||
"python_version": miniutils.quote("3.6"),
|
"vc_product": self.vscode_spec.get_product(),
|
||||||
"vc_version": miniutils.quote(self.vscode_spec.dotted_version()),
|
"use_cuda": miniutils.quote(str(int(is_running_on_cuda))),
|
||||||
"vc_year": miniutils.quote(str(self.vscode_spec.year)),
|
"requires": prerequisite_jobs,
|
||||||
"vc_product": self.vscode_spec.get_product(),
|
}
|
||||||
"use_cuda": miniutils.quote(str(int(is_running_on_cuda))),
|
|
||||||
"requires": prerequisite_jobs,
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.master_only:
|
if self.master_only_pred(self):
|
||||||
props_dict[
|
props_dict[
|
||||||
"filters"
|
"filters"
|
||||||
] = gen_filter_dict()
|
] = cimodel.data.simple.util.branch_filters.gen_filter_dict()
|
||||||
elif self.nightly_only:
|
|
||||||
props_dict[
|
|
||||||
"filters"
|
|
||||||
] = gen_filter_dict(branches_list=["nightly"], tags_list=RC_PATTERN)
|
|
||||||
elif self.master_and_nightly:
|
|
||||||
props_dict[
|
|
||||||
"filters"
|
|
||||||
] = gen_filter_dict(branches_list=NON_PR_BRANCH_LIST + ["nightly"], tags_list=RC_PATTERN)
|
|
||||||
|
|
||||||
name_parts = base_name_parts + cpu_forcing_name_parts + [numbered_phase]
|
name_parts = base_name_parts + cpu_forcing_name_parts + [numbered_phase]
|
||||||
|
|
||||||
if not self.multi_gpu:
|
if base_phase == "test":
|
||||||
if base_phase == "test":
|
test_name = "-".join(["pytorch", "windows", numbered_phase])
|
||||||
test_name = "-".join(["pytorch", "windows", numbered_phase])
|
props_dict["test_name"] = test_name
|
||||||
props_dict["test_name"] = test_name
|
|
||||||
|
|
||||||
if is_running_on_cuda:
|
if is_running_on_cuda:
|
||||||
props_dict["executor"] = "windows-with-nvidia-gpu"
|
props_dict["executor"] = "windows-with-nvidia-gpu"
|
||||||
|
|
||||||
props_dict["cuda_version"] = (
|
props_dict["cuda_version"] = (
|
||||||
miniutils.quote(str(self.cuda_version))
|
miniutils.quote(str(self.cuda_version))
|
||||||
if self.cuda_version
|
if self.cuda_version
|
||||||
else "cpu"
|
else "cpu"
|
||||||
)
|
)
|
||||||
|
|
||||||
props_dict["name"] = "_".join(name_parts)
|
props_dict["name"] = "_".join(name_parts)
|
||||||
|
|
||||||
@ -129,7 +108,7 @@ class VcSpec:
|
|||||||
return [self.prefixed_year()] + self.version_elements
|
return [self.prefixed_year()] + self.version_elements
|
||||||
|
|
||||||
def get_product(self):
|
def get_product(self):
|
||||||
return "BuildTools"
|
return "Community" if self.year == 2019 else "BuildTools"
|
||||||
|
|
||||||
def dotted_version(self):
|
def dotted_version(self):
|
||||||
return ".".join(self.version_elements)
|
return ".".join(self.version_elements)
|
||||||
@ -140,23 +119,28 @@ class VcSpec:
|
|||||||
def render(self):
|
def render(self):
|
||||||
return "_".join(self.get_elements())
|
return "_".join(self.get_elements())
|
||||||
|
|
||||||
|
def FalsePred(_):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def TruePred(_):
|
||||||
|
return True
|
||||||
|
|
||||||
_VC2019 = VcSpec(2019)
|
_VC2019 = VcSpec(2019)
|
||||||
|
|
||||||
WORKFLOW_DATA = [
|
WORKFLOW_DATA = [
|
||||||
# VS2019 CUDA-10.1
|
# VS2019 CUDA-10.1
|
||||||
WindowsJob(None, _VC2019, CudaVersion(10, 1), master_only=True),
|
WindowsJob(None, _VC2019, CudaVersion(10, 1)),
|
||||||
WindowsJob(1, _VC2019, CudaVersion(10, 1), master_only=True),
|
WindowsJob(1, _VC2019, CudaVersion(10, 1)),
|
||||||
WindowsJob(2, _VC2019, CudaVersion(10, 1), master_only=True),
|
WindowsJob(2, _VC2019, CudaVersion(10, 1)),
|
||||||
# VS2019 CUDA-11.1
|
# VS2019 CUDA-11.1
|
||||||
WindowsJob(None, _VC2019, CudaVersion(11, 1)),
|
WindowsJob(None, _VC2019, CudaVersion(11, 1)),
|
||||||
WindowsJob(1, _VC2019, CudaVersion(11, 1), master_only=True),
|
WindowsJob(1, _VC2019, CudaVersion(11, 1), master_only_pred=TruePred),
|
||||||
WindowsJob(2, _VC2019, CudaVersion(11, 1), master_only=True),
|
WindowsJob(2, _VC2019, CudaVersion(11, 1), master_only_pred=TruePred),
|
||||||
WindowsJob('_azure_multi_gpu', _VC2019, CudaVersion(11, 1), multi_gpu=True, nightly_only=True),
|
|
||||||
# VS2019 CPU-only
|
# VS2019 CPU-only
|
||||||
WindowsJob(None, _VC2019, None),
|
WindowsJob(None, _VC2019, None),
|
||||||
WindowsJob(1, _VC2019, None),
|
WindowsJob(1, _VC2019, None, master_only_pred=TruePred),
|
||||||
WindowsJob(2, _VC2019, None),
|
WindowsJob(2, _VC2019, None, master_only_pred=TruePred),
|
||||||
WindowsJob(1, _VC2019, CudaVersion(10, 1), force_on_cpu=True, master_only=True),
|
WindowsJob(1, _VC2019, CudaVersion(10, 1), force_on_cpu=True, master_only_pred=TruePred),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
3209
.circleci/config.yml
3209
.circleci/config.yml
File diff suppressed because it is too large
Load Diff
@ -12,20 +12,8 @@ each image as the `BUILD_ENVIRONMENT` environment variable.
|
|||||||
|
|
||||||
See `build.sh` for valid build environments (it's the giant switch).
|
See `build.sh` for valid build environments (it's the giant switch).
|
||||||
|
|
||||||
Docker builds are now defined with `.circleci/cimodel/data/simple/docker_definitions.py`
|
|
||||||
|
|
||||||
## Contents
|
## Contents
|
||||||
|
|
||||||
* `build.sh` -- dispatch script to launch all builds
|
* `build.sh` -- dispatch script to launch all builds
|
||||||
* `common` -- scripts used to execute individual Docker build stages
|
* `common` -- scripts used to execute individual Docker build stages
|
||||||
* `ubuntu-cuda` -- Dockerfile for Ubuntu image with CUDA support for nvidia-docker
|
* `ubuntu-cuda` -- Dockerfile for Ubuntu image with CUDA support for nvidia-docker
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Build a specific image
|
|
||||||
./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest
|
|
||||||
|
|
||||||
# Set flags (see build.sh) and build image
|
|
||||||
sudo bash -c 'BREAKPAD=1 ./build.sh pytorch-linux-bionic-py3.8-gcc9 -t myimage:latest
|
|
||||||
```
|
|
||||||
|
|||||||
@ -20,8 +20,10 @@ buildscript {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dependencies {
|
dependencies {
|
||||||
classpath 'com.android.tools.build:gradle:4.1.2'
|
classpath 'com.android.tools.build:gradle:3.3.2'
|
||||||
classpath 'com.vanniktech:gradle-maven-publish-plugin:0.14.2'
|
classpath "com.jfrog.bintray.gradle:gradle-bintray-plugin:1.8.0"
|
||||||
|
classpath "com.github.dcendents:android-maven-gradle-plugin:2.1"
|
||||||
|
classpath "org.jfrog.buildinfo:build-info-extractor-gradle:4.9.8"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -88,7 +88,6 @@ case "$image" in
|
|||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
KATEX=yes
|
KATEX=yes
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-xenial-py3.6-gcc7.2)
|
pytorch-linux-xenial-py3.6-gcc7.2)
|
||||||
ANACONDA_PYTHON_VERSION=3.6
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
@ -101,7 +100,24 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
;;
|
||||||
|
pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc5.4)
|
||||||
|
CUDA_VERSION=9.2
|
||||||
|
CUDNN_VERSION=7
|
||||||
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
|
GCC_VERSION=5
|
||||||
|
PROTOBUF=yes
|
||||||
|
DB=yes
|
||||||
|
VISION=yes
|
||||||
|
;;
|
||||||
|
pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7)
|
||||||
|
CUDA_VERSION=9.2
|
||||||
|
CUDNN_VERSION=7
|
||||||
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
|
GCC_VERSION=7
|
||||||
|
PROTOBUF=yes
|
||||||
|
DB=yes
|
||||||
|
VISION=yes
|
||||||
;;
|
;;
|
||||||
pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7)
|
pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7)
|
||||||
CUDA_VERSION=10.0
|
CUDA_VERSION=10.0
|
||||||
@ -111,7 +127,6 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7)
|
pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7)
|
||||||
CUDA_VERSION=10.1
|
CUDA_VERSION=10.1
|
||||||
@ -122,7 +137,6 @@ case "$image" in
|
|||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
KATEX=yes
|
KATEX=yes
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7)
|
pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7)
|
||||||
CUDA_VERSION=10.2
|
CUDA_VERSION=10.2
|
||||||
@ -133,7 +147,16 @@ case "$image" in
|
|||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
KATEX=yes
|
KATEX=yes
|
||||||
BREAKPAD=yes
|
;;
|
||||||
|
pytorch-linux-xenial-cuda11.0-cudnn8-py3-gcc7)
|
||||||
|
CUDA_VERSION=11.0
|
||||||
|
CUDNN_VERSION=8
|
||||||
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
|
GCC_VERSION=7
|
||||||
|
PROTOBUF=yes
|
||||||
|
DB=yes
|
||||||
|
VISION=yes
|
||||||
|
KATEX=yes
|
||||||
;;
|
;;
|
||||||
pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7)
|
pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7)
|
||||||
CUDA_VERSION=11.1
|
CUDA_VERSION=11.1
|
||||||
@ -144,18 +167,6 @@ case "$image" in
|
|||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
KATEX=yes
|
KATEX=yes
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
|
||||||
pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7)
|
|
||||||
CUDA_VERSION=11.3.0 # Deviating from major.minor to conform to nvidia's Docker image names
|
|
||||||
CUDNN_VERSION=8
|
|
||||||
ANACONDA_PYTHON_VERSION=3.6
|
|
||||||
GCC_VERSION=7
|
|
||||||
PROTOBUF=yes
|
|
||||||
DB=yes
|
|
||||||
VISION=yes
|
|
||||||
KATEX=yes
|
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-xenial-py3-clang5-asan)
|
pytorch-linux-xenial-py3-clang5-asan)
|
||||||
ANACONDA_PYTHON_VERSION=3.6
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
@ -163,7 +174,6 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-xenial-py3-clang7-onnx)
|
pytorch-linux-xenial-py3-clang7-onnx)
|
||||||
ANACONDA_PYTHON_VERSION=3.6
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
@ -171,7 +181,6 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-xenial-py3-clang5-android-ndk-r19c)
|
pytorch-linux-xenial-py3-clang5-android-ndk-r19c)
|
||||||
ANACONDA_PYTHON_VERSION=3.6
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
@ -180,7 +189,7 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
ANDROID=yes
|
ANDROID=yes
|
||||||
ANDROID_NDK_VERSION=r19c
|
ANDROID_NDK_VERSION=r19c
|
||||||
GRADLE_VERSION=6.8.3
|
GRADLE_VERSION=4.10.3
|
||||||
CMAKE_VERSION=3.7.0
|
CMAKE_VERSION=3.7.0
|
||||||
NINJA_VERSION=1.9.0
|
NINJA_VERSION=1.9.0
|
||||||
;;
|
;;
|
||||||
@ -190,7 +199,6 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-bionic-py3.6-clang9)
|
pytorch-linux-bionic-py3.6-clang9)
|
||||||
ANACONDA_PYTHON_VERSION=3.6
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
@ -198,8 +206,7 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
VULKAN_SDK_VERSION=1.2.148.0
|
||||||
VULKAN_SDK_VERSION=1.2.162.1
|
|
||||||
SWIFTSHADER=yes
|
SWIFTSHADER=yes
|
||||||
;;
|
;;
|
||||||
pytorch-linux-bionic-py3.8-gcc9)
|
pytorch-linux-bionic-py3.8-gcc9)
|
||||||
@ -208,8 +215,6 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9)
|
pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9)
|
||||||
CUDA_VERSION=10.2
|
CUDA_VERSION=10.2
|
||||||
@ -219,7 +224,6 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-bionic-cuda10.2-cudnn7-py3.8-gcc9)
|
pytorch-linux-bionic-cuda10.2-cudnn7-py3.8-gcc9)
|
||||||
CUDA_VERSION=10.2
|
CUDA_VERSION=10.2
|
||||||
@ -229,17 +233,6 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
|
||||||
pytorch-linux-bionic-cuda10.2-cudnn7-py3.9-gcc7)
|
|
||||||
CUDA_VERSION=10.2
|
|
||||||
CUDNN_VERSION=7
|
|
||||||
ANACONDA_PYTHON_VERSION=3.9
|
|
||||||
GCC_VERSION=7
|
|
||||||
PROTOBUF=yes
|
|
||||||
DB=yes
|
|
||||||
VISION=yes
|
|
||||||
BREAKPAD=yes
|
|
||||||
;;
|
;;
|
||||||
pytorch-linux-bionic-cuda11.0-cudnn8-py3.6-gcc9)
|
pytorch-linux-bionic-cuda11.0-cudnn8-py3.6-gcc9)
|
||||||
CUDA_VERSION=11.0
|
CUDA_VERSION=11.0
|
||||||
@ -249,42 +242,57 @@ case "$image" in
|
|||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
KATEX=yes
|
||||||
|
;;
|
||||||
|
pytorch-linux-bionic-cuda11.0-cudnn8-py3.8-gcc9)
|
||||||
|
CUDA_VERSION=11.0
|
||||||
|
CUDNN_VERSION=8
|
||||||
|
ANACONDA_PYTHON_VERSION=3.8
|
||||||
|
GCC_VERSION=9
|
||||||
|
PROTOBUF=yes
|
||||||
|
DB=yes
|
||||||
|
VISION=yes
|
||||||
|
KATEX=yes
|
||||||
|
;;
|
||||||
|
pytorch-linux-bionic-cuda11.1-cudnn8-py3.6-gcc9)
|
||||||
|
CUDA_VERSION=11.1
|
||||||
|
CUDNN_VERSION=8
|
||||||
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
|
GCC_VERSION=9
|
||||||
|
PROTOBUF=yes
|
||||||
|
DB=yes
|
||||||
|
VISION=yes
|
||||||
|
KATEX=yes
|
||||||
|
;;
|
||||||
|
pytorch-linux-bionic-cuda11.1-cudnn8-py3.8-gcc9)
|
||||||
|
CUDA_VERSION=11.1
|
||||||
|
CUDNN_VERSION=8
|
||||||
|
ANACONDA_PYTHON_VERSION=3.8
|
||||||
|
GCC_VERSION=9
|
||||||
|
PROTOBUF=yes
|
||||||
|
DB=yes
|
||||||
|
VISION=yes
|
||||||
|
KATEX=yes
|
||||||
|
;;
|
||||||
|
pytorch-linux-bionic-rocm3.9-py3.6)
|
||||||
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
|
PROTOBUF=yes
|
||||||
|
DB=yes
|
||||||
|
VISION=yes
|
||||||
ROCM_VERSION=3.9
|
ROCM_VERSION=3.9
|
||||||
;;
|
;;
|
||||||
pytorch-linux-bionic-rocm4.0.1-py3.6)
|
pytorch-linux-bionic-rocm3.10-py3.6)
|
||||||
ANACONDA_PYTHON_VERSION=3.6
|
ANACONDA_PYTHON_VERSION=3.6
|
||||||
GCC_VERSION=9
|
|
||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
ROCM_VERSION=3.10
|
||||||
ROCM_VERSION=4.0.1
|
|
||||||
;;
|
|
||||||
pytorch-linux-bionic-rocm4.1-py3.6)
|
|
||||||
ANACONDA_PYTHON_VERSION=3.6
|
|
||||||
GCC_VERSION=9
|
|
||||||
PROTOBUF=yes
|
|
||||||
DB=yes
|
|
||||||
VISION=yes
|
|
||||||
BREAKPAD=yes
|
|
||||||
ROCM_VERSION=4.1
|
|
||||||
;;
|
|
||||||
pytorch-linux-bionic-rocm4.2-py3.6)
|
|
||||||
ANACONDA_PYTHON_VERSION=3.6
|
|
||||||
GCC_VERSION=9
|
|
||||||
PROTOBUF=yes
|
|
||||||
DB=yes
|
|
||||||
VISION=yes
|
|
||||||
BREAKPAD=yes
|
|
||||||
ROCM_VERSION=4.2
|
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
# Catch-all for builds that are not hardcoded.
|
# Catch-all for builds that are not hardcoded.
|
||||||
PROTOBUF=yes
|
PROTOBUF=yes
|
||||||
DB=yes
|
DB=yes
|
||||||
VISION=yes
|
VISION=yes
|
||||||
BREAKPAD=yes
|
|
||||||
echo "image '$image' did not match an existing build configuration"
|
echo "image '$image' did not match an existing build configuration"
|
||||||
if [[ "$image" == *py* ]]; then
|
if [[ "$image" == *py* ]]; then
|
||||||
extract_version_from_image_name py ANACONDA_PYTHON_VERSION
|
extract_version_from_image_name py ANACONDA_PYTHON_VERSION
|
||||||
@ -320,7 +328,7 @@ if [ -n "${JENKINS:-}" ]; then
|
|||||||
JENKINS_GID=$(id -g jenkins)
|
JENKINS_GID=$(id -g jenkins)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
tmp_tag="tmp-$(cat /dev/urandom | tr -dc 'a-z' | head -c 32)"
|
tmp_tag="tmp-$(cat /dev/urandom | tr -dc 'a-z' | fold -w 32 | head -n 1)"
|
||||||
|
|
||||||
# Build image
|
# Build image
|
||||||
# TODO: build-arg THRIFT is not turned on for any image, remove it once we confirm
|
# TODO: build-arg THRIFT is not turned on for any image, remove it once we confirm
|
||||||
@ -348,7 +356,6 @@ docker build \
|
|||||||
--build-arg "GCC_VERSION=${GCC_VERSION}" \
|
--build-arg "GCC_VERSION=${GCC_VERSION}" \
|
||||||
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
|
--build-arg "CUDA_VERSION=${CUDA_VERSION}" \
|
||||||
--build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \
|
--build-arg "CUDNN_VERSION=${CUDNN_VERSION}" \
|
||||||
--build-arg "BREAKPAD=${BREAKPAD}" \
|
|
||||||
--build-arg "ANDROID=${ANDROID}" \
|
--build-arg "ANDROID=${ANDROID}" \
|
||||||
--build-arg "ANDROID_NDK=${ANDROID_NDK_VERSION}" \
|
--build-arg "ANDROID_NDK=${ANDROID_NDK_VERSION}" \
|
||||||
--build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \
|
--build-arg "GRADLE_VERSION=${GRADLE_VERSION}" \
|
||||||
|
|||||||
@ -46,7 +46,4 @@ trap "docker logout ${registry}" EXIT
|
|||||||
docker push "${image}:${tag}"
|
docker push "${image}:${tag}"
|
||||||
|
|
||||||
docker save -o "${IMAGE_NAME}:${tag}.tar" "${image}:${tag}"
|
docker save -o "${IMAGE_NAME}:${tag}.tar" "${image}:${tag}"
|
||||||
|
aws s3 cp "${IMAGE_NAME}:${tag}.tar" "s3://ossci-linux-build/pytorch/base/${IMAGE_NAME}:${tag}.tar" --acl public-read
|
||||||
if [ -z "${DOCKER_SKIP_S3_UPLOAD:-}" ]; then
|
|
||||||
aws s3 cp "${IMAGE_NAME}:${tag}.tar" "s3://ossci-linux-build/pytorch/base/${IMAGE_NAME}:${tag}.tar" --acl public-read
|
|
||||||
fi
|
|
||||||
|
|||||||
@ -64,7 +64,6 @@ ENV PATH /opt/rocm/hcc/bin:$PATH
|
|||||||
ENV PATH /opt/rocm/hip/bin:$PATH
|
ENV PATH /opt/rocm/hip/bin:$PATH
|
||||||
ENV PATH /opt/rocm/opencl/bin:$PATH
|
ENV PATH /opt/rocm/opencl/bin:$PATH
|
||||||
ENV PATH /opt/rocm/llvm/bin:$PATH
|
ENV PATH /opt/rocm/llvm/bin:$PATH
|
||||||
ENV MAGMA_HOME /opt/rocm/magma
|
|
||||||
ENV LANG en_US.utf8
|
ENV LANG en_US.utf8
|
||||||
ENV LC_ALL en_US.utf8
|
ENV LC_ALL en_US.utf8
|
||||||
|
|
||||||
|
|||||||
@ -99,7 +99,7 @@ echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES
|
|||||||
chown -R jenkins /var/lib/jenkins/gradledeps
|
chown -R jenkins /var/lib/jenkins/gradledeps
|
||||||
chgrp -R jenkins /var/lib/jenkins/gradledeps
|
chgrp -R jenkins /var/lib/jenkins/gradledeps
|
||||||
|
|
||||||
sudo -H -u jenkins $GRADLE_HOME/bin/gradle -Pandroid.useAndroidX=true -p /var/lib/jenkins/gradledeps -g /var/lib/jenkins/.gradle --refresh-dependencies --debug --stacktrace assemble
|
sudo -H -u jenkins $GRADLE_HOME/bin/gradle -p /var/lib/jenkins/gradledeps -g /var/lib/jenkins/.gradle --refresh-dependencies --debug --stacktrace assemble
|
||||||
|
|
||||||
chown -R jenkins /var/lib/jenkins/.gradle
|
chown -R jenkins /var/lib/jenkins/.gradle
|
||||||
chgrp -R jenkins /var/lib/jenkins/.gradle
|
chgrp -R jenkins /var/lib/jenkins/.gradle
|
||||||
|
|||||||
@ -77,7 +77,6 @@ install_centos() {
|
|||||||
glog-devel \
|
glog-devel \
|
||||||
hiredis-devel \
|
hiredis-devel \
|
||||||
libstdc++-devel \
|
libstdc++-devel \
|
||||||
libsndfile-devel \
|
|
||||||
make \
|
make \
|
||||||
opencv-devel \
|
opencv-devel \
|
||||||
sudo \
|
sudo \
|
||||||
|
|||||||
@ -1,19 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
git clone https://github.com/malfet/breakpad.git -b pytorch/release-1.9
|
|
||||||
pushd breakpad
|
|
||||||
|
|
||||||
git clone https://chromium.googlesource.com/linux-syscall-support src/third_party/lss
|
|
||||||
pushd src/third_party/lss
|
|
||||||
# same as with breakpad, there are no real releases for this repo so use a
|
|
||||||
# commit as the pin
|
|
||||||
git checkout e1e7b0ad8ee99a875b272c8e33e308472e897660
|
|
||||||
popd
|
|
||||||
|
|
||||||
./configure
|
|
||||||
make
|
|
||||||
make install
|
|
||||||
popd
|
|
||||||
rm -rf breakpad
|
|
||||||
@ -71,22 +71,18 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
|||||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||||
# DO NOT install cmake here as it would install a version newer than 3.5, but
|
# DO NOT install cmake here as it would install a version newer than 3.5, but
|
||||||
# we want to pin to version 3.5.
|
# we want to pin to version 3.5.
|
||||||
SCIPY_VERSION=1.1.0
|
if [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
|
||||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.9" ]; then
|
|
||||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||||
conda_install numpy=1.19.2 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0 -c conda-forge
|
conda_install numpy=1.18.5 pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0
|
||||||
SCIPY_VERSION=1.6.0
|
|
||||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.8" ]; then
|
|
||||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
|
||||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six llvmdev=8.0.0
|
|
||||||
elif [ "$ANACONDA_PYTHON_VERSION" = "3.7" ]; then
|
elif [ "$ANACONDA_PYTHON_VERSION" = "3.7" ]; then
|
||||||
# DO NOT install dataclasses if installing python-3.7, since its part of python-3.7 core packages
|
# DO NOT install dataclasses if installing python-3.7, since its part of python-3.7 core packages
|
||||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six typing_extensions
|
conda_install numpy=1.18.5 pyyaml mkl mkl-include setuptools cffi future six typing_extensions
|
||||||
else
|
else
|
||||||
conda_install numpy=1.18.5 astunparse pyyaml mkl mkl-include setuptools cffi future six dataclasses typing_extensions
|
conda_install numpy=1.18.5 pyyaml mkl mkl-include setuptools cffi future six dataclasses typing_extensions
|
||||||
fi
|
fi
|
||||||
|
if [[ "$CUDA_VERSION" == 9.2* ]]; then
|
||||||
if [[ "$CUDA_VERSION" == 10.0* ]]; then
|
conda_install magma-cuda92 -c pytorch
|
||||||
|
elif [[ "$CUDA_VERSION" == 10.0* ]]; then
|
||||||
conda_install magma-cuda100 -c pytorch
|
conda_install magma-cuda100 -c pytorch
|
||||||
elif [[ "$CUDA_VERSION" == 10.1* ]]; then
|
elif [[ "$CUDA_VERSION" == 10.1* ]]; then
|
||||||
conda_install magma-cuda101 -c pytorch
|
conda_install magma-cuda101 -c pytorch
|
||||||
@ -96,8 +92,8 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
|||||||
conda_install magma-cuda110 -c pytorch
|
conda_install magma-cuda110 -c pytorch
|
||||||
elif [[ "$CUDA_VERSION" == 11.1* ]]; then
|
elif [[ "$CUDA_VERSION" == 11.1* ]]; then
|
||||||
conda_install magma-cuda111 -c pytorch
|
conda_install magma-cuda111 -c pytorch
|
||||||
elif [[ "$CUDA_VERSION" == 11.3* ]]; then
|
elif [[ "$CUDA_VERSION" == 11.2* ]]; then
|
||||||
conda_install magma-cuda113 -c pytorch
|
conda_install magma-cuda112 -c pytorch
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# TODO: This isn't working atm
|
# TODO: This isn't working atm
|
||||||
@ -107,26 +103,20 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
|||||||
# TODO: Why is scipy pinned
|
# TODO: Why is scipy pinned
|
||||||
# Pin MyPy version because new errors are likely to appear with each release
|
# Pin MyPy version because new errors are likely to appear with each release
|
||||||
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
# Pin hypothesis to avoid flakiness: https://github.com/pytorch/pytorch/issues/31136
|
||||||
# Pin coverage so we can use COVERAGE_RCFILE
|
|
||||||
as_jenkins pip install --progress-bar off pytest \
|
as_jenkins pip install --progress-bar off pytest \
|
||||||
scipy==$SCIPY_VERSION \
|
scipy==1.1.0 \
|
||||||
scikit-image \
|
scikit-image \
|
||||||
|
librosa>=0.6.2 \
|
||||||
psutil \
|
psutil \
|
||||||
|
numba \
|
||||||
|
llvmlite \
|
||||||
unittest-xml-reporting \
|
unittest-xml-reporting \
|
||||||
boto3==1.16.34 \
|
boto3==1.16.34 \
|
||||||
coverage==5.5 \
|
coverage \
|
||||||
hypothesis==4.53.2 \
|
hypothesis==4.53.2 \
|
||||||
mypy==0.812 \
|
mypy==0.770 \
|
||||||
tb-nightly
|
tb-nightly
|
||||||
|
|
||||||
# Install numba only on python-3.8 or below
|
|
||||||
# For numba issue see https://github.com/pytorch/pytorch/issues/51511
|
|
||||||
if [[ $(python -c "import sys; print(int(sys.version_info < (3, 9)))") == "1" ]]; then
|
|
||||||
as_jenkins pip install --progress-bar off numba librosa>=0.6.2
|
|
||||||
else
|
|
||||||
as_jenkins pip install --progress-bar off numba==0.49.0 librosa>=0.6.2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Update scikit-learn to a python-3.8 compatible version
|
# Update scikit-learn to a python-3.8 compatible version
|
||||||
if [[ $(python -c "import sys; print(int(sys.version_info >= (3, 8)))") == "1" ]]; then
|
if [[ $(python -c "import sys; print(int(sys.version_info >= (3, 8)))") == "1" ]]; then
|
||||||
as_jenkins pip install --progress-bar off -U scikit-learn
|
as_jenkins pip install --progress-bar off -U scikit-learn
|
||||||
|
|||||||
@ -1,14 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
OPENSSL=openssl-1.1.1k
|
|
||||||
|
|
||||||
wget -q -O "${OPENSSL}.tar.gz" "https://www.openssl.org/source/${OPENSSL}.tar.gz"
|
|
||||||
tar xf "${OPENSSL}.tar.gz"
|
|
||||||
cd "${OPENSSL}"
|
|
||||||
./config --prefix=/opt/openssl -d '-Wl,--enable-new-dtags,-rpath,$(LIBRPATH)'
|
|
||||||
# NOTE: opensl errors out when built with the -j option
|
|
||||||
make install_sw
|
|
||||||
cd ..
|
|
||||||
rm -rf "${OPENSSL}"
|
|
||||||
@ -4,27 +4,20 @@ set -ex
|
|||||||
|
|
||||||
install_magma() {
|
install_magma() {
|
||||||
# "install" hipMAGMA into /opt/rocm/magma by copying after build
|
# "install" hipMAGMA into /opt/rocm/magma by copying after build
|
||||||
git clone https://bitbucket.org/icl/magma.git
|
git clone https://bitbucket.org/icl/magma.git -b hipMAGMA
|
||||||
pushd magma
|
pushd magma
|
||||||
git checkout 878b1ce02e9cfe4a829be22c8f911e9c0b6bd88f
|
cp make.inc-examples/make.inc.hip-mkl-gcc make.inc
|
||||||
cp make.inc-examples/make.inc.hip-gcc-mkl make.inc
|
|
||||||
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
|
echo 'LIBDIR += -L$(MKLROOT)/lib' >> make.inc
|
||||||
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc
|
echo 'LIB += -Wl,--enable-new-dtags -Wl,--rpath,/opt/rocm/lib -Wl,--rpath,$(MKLROOT)/lib -Wl,--rpath,/opt/rocm/magma/lib' >> make.inc
|
||||||
echo 'DEVCCFLAGS += --amdgpu-target=gfx803 --amdgpu-target=gfx900 --amdgpu-target=gfx906 --amdgpu-target=gfx908 --gpu-max-threads-per-block=256' >> make.inc
|
echo 'DEVCCFLAGS += --amdgpu-target=gfx803 --amdgpu-target=gfx900 --amdgpu-target=gfx906 --amdgpu-target=gfx908' >> make.inc
|
||||||
# hipcc with openmp flag may cause isnan() on __device__ not to be found; depending on context, compiler may attempt to match with host definition
|
|
||||||
sed -i 's/^FOPENMP/#FOPENMP/g' make.inc
|
|
||||||
export PATH="${PATH}:/opt/rocm/bin"
|
export PATH="${PATH}:/opt/rocm/bin"
|
||||||
make -f make.gen.hipMAGMA -j $(nproc)
|
make -f make.gen.hipMAGMA -j $(nproc)
|
||||||
LANG=C.UTF-8 make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda
|
make lib/libmagma.so -j $(nproc) MKLROOT=/opt/conda
|
||||||
make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda
|
make testing/testing_dgemm -j $(nproc) MKLROOT=/opt/conda
|
||||||
popd
|
popd
|
||||||
mv magma /opt/rocm
|
mv magma /opt/rocm
|
||||||
}
|
}
|
||||||
|
|
||||||
ver() {
|
|
||||||
printf "%3d%03d%03d%03d" $(echo "$1" | tr '.' ' ');
|
|
||||||
}
|
|
||||||
|
|
||||||
install_ubuntu() {
|
install_ubuntu() {
|
||||||
apt-get update
|
apt-get update
|
||||||
if [[ $UBUNTU_VERSION == 18.04 ]]; then
|
if [[ $UBUNTU_VERSION == 18.04 ]]; then
|
||||||
@ -38,14 +31,9 @@ install_ubuntu() {
|
|||||||
apt-get install -y libc++1
|
apt-get install -y libc++1
|
||||||
apt-get install -y libc++abi1
|
apt-get install -y libc++abi1
|
||||||
|
|
||||||
ROCM_REPO="ubuntu"
|
|
||||||
if [[ $(ver $ROCM_VERSION) -lt $(ver 4.2) ]]; then
|
|
||||||
ROCM_REPO="xenial"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Add rocm repository
|
# Add rocm repository
|
||||||
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
|
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add -
|
||||||
echo "deb [arch=amd64] http://repo.radeon.com/rocm/apt/${ROCM_VERSION} ${ROCM_REPO} main" > /etc/apt/sources.list.d/rocm.list
|
echo "deb [arch=amd64] http://repo.radeon.com/rocm/apt/${ROCM_VERSION} xenial main" > /etc/apt/sources.list.d/rocm.list
|
||||||
apt-get update --allow-insecure-repositories
|
apt-get update --allow-insecure-repositories
|
||||||
|
|
||||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
|
DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
|
||||||
|
|||||||
@ -8,17 +8,16 @@ retry () {
|
|||||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_https_amazon_aws=https://ossci-android.s3.amazonaws.com
|
||||||
|
|
||||||
_vulkansdk_dir=/var/lib/jenkins/vulkansdk
|
_vulkansdk_dir=/var/lib/jenkins/vulkansdk
|
||||||
|
mkdir -p $_vulkansdk_dir
|
||||||
_tmp_vulkansdk_targz=/tmp/vulkansdk.tar.gz
|
_tmp_vulkansdk_targz=/tmp/vulkansdk.tar.gz
|
||||||
|
curl --silent --show-error --location --fail --retry 3 \
|
||||||
|
--output "$_tmp_vulkansdk_targz" "$_https_amazon_aws/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz"
|
||||||
|
|
||||||
curl \
|
tar -C "$_vulkansdk_dir" -xzf "$_tmp_vulkansdk_targz" --strip-components 1
|
||||||
--silent \
|
|
||||||
--show-error \
|
|
||||||
--location \
|
|
||||||
--fail \
|
|
||||||
--retry 3 \
|
|
||||||
--output "${_tmp_vulkansdk_targz}" "https://ossci-android.s3.amazonaws.com/vulkansdk-linux-x86_64-${VULKAN_SDK_VERSION}.tar.gz"
|
|
||||||
|
|
||||||
mkdir -p "${_vulkansdk_dir}"
|
export VULKAN_SDK="$_vulkansdk_dir/"
|
||||||
tar -C "${_vulkansdk_dir}" -xzf "${_tmp_vulkansdk_targz}" --strip-components 1
|
|
||||||
rm -rf "${_tmp_vulkansdk_targz}"
|
rm "$_tmp_vulkansdk_targz"
|
||||||
|
|||||||
@ -61,10 +61,6 @@ RUN if [ -n "${VISION}" ]; then bash ./install_vision.sh; fi
|
|||||||
RUN rm install_vision.sh
|
RUN rm install_vision.sh
|
||||||
ENV INSTALLED_VISION ${VISION}
|
ENV INSTALLED_VISION ${VISION}
|
||||||
|
|
||||||
ADD ./common/install_openssl.sh install_openssl.sh
|
|
||||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
|
||||||
RUN bash ./install_openssl.sh
|
|
||||||
|
|
||||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||||
ADD ./common/install_cache.sh install_cache.sh
|
ADD ./common/install_cache.sh install_cache.sh
|
||||||
ENV PATH /opt/cache/bin:$PATH
|
ENV PATH /opt/cache/bin:$PATH
|
||||||
|
|||||||
@ -27,11 +27,6 @@ ARG ANACONDA_PYTHON_VERSION
|
|||||||
ADD ./common/install_conda.sh install_conda.sh
|
ADD ./common/install_conda.sh install_conda.sh
|
||||||
RUN bash ./install_conda.sh && rm install_conda.sh
|
RUN bash ./install_conda.sh && rm install_conda.sh
|
||||||
|
|
||||||
# Install gcc
|
|
||||||
ARG GCC_VERSION
|
|
||||||
ADD ./common/install_gcc.sh install_gcc.sh
|
|
||||||
RUN bash ./install_gcc.sh && rm install_gcc.sh
|
|
||||||
|
|
||||||
# (optional) Install protobuf for ONNX
|
# (optional) Install protobuf for ONNX
|
||||||
ARG PROTOBUF
|
ARG PROTOBUF
|
||||||
ADD ./common/install_protobuf.sh install_protobuf.sh
|
ADD ./common/install_protobuf.sh install_protobuf.sh
|
||||||
|
|||||||
@ -82,13 +82,6 @@ RUN rm AndroidManifest.xml
|
|||||||
RUN rm build.gradle
|
RUN rm build.gradle
|
||||||
ENV INSTALLED_ANDROID ${ANDROID}
|
ENV INSTALLED_ANDROID ${ANDROID}
|
||||||
|
|
||||||
# (optional) Install breakpad
|
|
||||||
ARG BREAKPAD
|
|
||||||
ADD ./common/install_breakpad.sh install_breakpad.sh
|
|
||||||
RUN if [ -n "${BREAKPAD}" ]; then bash ./install_breakpad.sh; fi
|
|
||||||
RUN rm install_breakpad.sh
|
|
||||||
ENV INSTALLED_BREAKPAD ${BREAKPAD}
|
|
||||||
|
|
||||||
# (optional) Install Vulkan SDK
|
# (optional) Install Vulkan SDK
|
||||||
ARG VULKAN_SDK_VERSION
|
ARG VULKAN_SDK_VERSION
|
||||||
ADD ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh
|
ADD ./common/install_vulkan_sdk.sh install_vulkan_sdk.sh
|
||||||
@ -113,10 +106,6 @@ ADD ./common/install_ninja.sh install_ninja.sh
|
|||||||
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
RUN if [ -n "${NINJA_VERSION}" ]; then bash ./install_ninja.sh; fi
|
||||||
RUN rm install_ninja.sh
|
RUN rm install_ninja.sh
|
||||||
|
|
||||||
ADD ./common/install_openssl.sh install_openssl.sh
|
|
||||||
RUN bash ./install_openssl.sh
|
|
||||||
ENV OPENSSL_ROOT_DIR /opt/openssl
|
|
||||||
|
|
||||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||||
ADD ./common/install_cache.sh install_cache.sh
|
ADD ./common/install_cache.sh install_cache.sh
|
||||||
ENV PATH /opt/cache/bin:$PATH
|
ENV PATH /opt/cache/bin:$PATH
|
||||||
|
|||||||
@ -1,10 +1,10 @@
|
|||||||
FROM ubuntu:18.04
|
FROM ubuntu:16.04
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y python3-pip git && rm -rf /var/lib/apt/lists/* /var/log/dpkg.log
|
RUN apt-get update && apt-get install -y python-pip git && rm -rf /var/lib/apt/lists/* /var/log/dpkg.log
|
||||||
|
|
||||||
ADD requirements.txt /requirements.txt
|
ADD requirements.txt /requirements.txt
|
||||||
|
|
||||||
RUN pip3 install -r /requirements.txt
|
RUN pip install -r /requirements.txt
|
||||||
|
|
||||||
ADD gc.py /usr/bin/gc.py
|
ADD gc.py /usr/bin/gc.py
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python
|
||||||
|
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
|||||||
@ -1,11 +1,11 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import boto3
|
|
||||||
import datetime
|
import datetime
|
||||||
|
import boto3
|
||||||
import pytz
|
import pytz
|
||||||
import re
|
|
||||||
import sys
|
import sys
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
def save_to_s3(project, data):
|
def save_to_s3(project, data):
|
||||||
@ -148,12 +148,9 @@ def chunks(chunkable, n):
|
|||||||
""" Yield successive n-sized chunks from l.
|
""" Yield successive n-sized chunks from l.
|
||||||
"""
|
"""
|
||||||
for i in range(0, len(chunkable), n):
|
for i in range(0, len(chunkable), n):
|
||||||
yield chunkable[i: i + n]
|
yield chunkable[i : i + n]
|
||||||
|
|
||||||
|
|
||||||
SHA_PATTERN = re.compile(r'^[0-9a-f]{40}$')
|
SHA_PATTERN = re.compile(r'^[0-9a-f]{40}$')
|
||||||
|
|
||||||
|
|
||||||
def looks_like_git_sha(tag):
|
def looks_like_git_sha(tag):
|
||||||
"""Returns a boolean to check if a tag looks like a git sha
|
"""Returns a boolean to check if a tag looks like a git sha
|
||||||
|
|
||||||
@ -162,7 +159,6 @@ def looks_like_git_sha(tag):
|
|||||||
"""
|
"""
|
||||||
return re.match(SHA_PATTERN, tag) is not None
|
return re.match(SHA_PATTERN, tag) is not None
|
||||||
|
|
||||||
|
|
||||||
stable_window_tags = []
|
stable_window_tags = []
|
||||||
for repo in repos(client):
|
for repo in repos(client):
|
||||||
repositoryName = repo["repositoryName"]
|
repositoryName = repo["repositoryName"]
|
||||||
|
|||||||
@ -80,52 +80,6 @@ class Header(object):
|
|||||||
for line in filter(None, lines):
|
for line in filter(None, lines):
|
||||||
output_filehandle.write(line + "\n")
|
output_filehandle.write(line + "\n")
|
||||||
|
|
||||||
def filter_master_only_jobs(items):
|
|
||||||
def _for_all_items(items, functor) -> None:
|
|
||||||
if isinstance(items, list):
|
|
||||||
for item in items:
|
|
||||||
_for_all_items(item, functor)
|
|
||||||
if isinstance(items, dict) and len(items) == 1:
|
|
||||||
item_type, item = next(iter(items.items()))
|
|
||||||
functor(item_type, item)
|
|
||||||
|
|
||||||
def _is_master_item(item):
|
|
||||||
filters = item.get('filters', None)
|
|
||||||
branches = filters.get('branches', None) if filters is not None else None
|
|
||||||
branches_only = branches.get('only', None) if branches is not None else None
|
|
||||||
return 'master' in branches_only if branches_only is not None else False
|
|
||||||
|
|
||||||
master_deps = set()
|
|
||||||
|
|
||||||
def _save_requires_if_master(item_type, item):
|
|
||||||
requires = item.get('requires', None)
|
|
||||||
item_name = item.get("name", None)
|
|
||||||
if not isinstance(requires, list):
|
|
||||||
return
|
|
||||||
if _is_master_item(item) or item_name in master_deps:
|
|
||||||
master_deps.update([n.strip('"') for n in requires])
|
|
||||||
|
|
||||||
def _do_filtering(items):
|
|
||||||
if isinstance(items, list):
|
|
||||||
rc = [_do_filtering(item) for item in items]
|
|
||||||
return [item for item in rc if len(item if item is not None else []) > 0]
|
|
||||||
assert isinstance(items, dict) and len(items) == 1
|
|
||||||
item_type, item = next(iter(items.items()))
|
|
||||||
item_name = item.get("name", None)
|
|
||||||
item_name = item_name.strip('"') if item_name is not None else None
|
|
||||||
if not _is_master_item(item) and item_name not in master_deps:
|
|
||||||
return None
|
|
||||||
if 'filters' in item:
|
|
||||||
item = item.copy()
|
|
||||||
item.pop('filters')
|
|
||||||
return {item_type: item}
|
|
||||||
|
|
||||||
# Scan of dependencies twice to pick up nested required jobs
|
|
||||||
# I.e. jobs depending on jobs that master-only job depend on
|
|
||||||
_for_all_items(items, _save_requires_if_master)
|
|
||||||
_for_all_items(items, _save_requires_if_master)
|
|
||||||
return _do_filtering(items)
|
|
||||||
|
|
||||||
|
|
||||||
def gen_build_workflows_tree():
|
def gen_build_workflows_tree():
|
||||||
build_workflows_functions = [
|
build_workflows_functions = [
|
||||||
@ -151,8 +105,7 @@ def gen_build_workflows_tree():
|
|||||||
binary_build_definitions.get_nightly_tests,
|
binary_build_definitions.get_nightly_tests,
|
||||||
binary_build_definitions.get_nightly_uploads,
|
binary_build_definitions.get_nightly_uploads,
|
||||||
]
|
]
|
||||||
build_jobs = [f() for f in build_workflows_functions]
|
|
||||||
master_build_jobs = filter_master_only_jobs(build_jobs)
|
|
||||||
return {
|
return {
|
||||||
"workflows": {
|
"workflows": {
|
||||||
"binary_builds": {
|
"binary_builds": {
|
||||||
@ -161,11 +114,7 @@ def gen_build_workflows_tree():
|
|||||||
},
|
},
|
||||||
"build": {
|
"build": {
|
||||||
"when": r"<< pipeline.parameters.run_build >>",
|
"when": r"<< pipeline.parameters.run_build >>",
|
||||||
"jobs": build_jobs,
|
"jobs": [f() for f in build_workflows_functions]
|
||||||
},
|
|
||||||
"master_build": {
|
|
||||||
"when": r"<< pipeline.parameters.run_master_build >>",
|
|
||||||
"jobs": master_build_jobs,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -190,7 +139,6 @@ YAML_SOURCES = [
|
|||||||
File("job-specs/docker_jobs.yml"),
|
File("job-specs/docker_jobs.yml"),
|
||||||
Header("Workflows"),
|
Header("Workflows"),
|
||||||
Treegen(gen_build_workflows_tree, 0),
|
Treegen(gen_build_workflows_tree, 0),
|
||||||
File("workflows/workflows-scheduled-ci.yml"),
|
|
||||||
File("workflows/workflows-ecr-gc.yml"),
|
File("workflows/workflows-ecr-gc.yml"),
|
||||||
File("workflows/workflows-promote.yml"),
|
File("workflows/workflows-promote.yml"),
|
||||||
]
|
]
|
||||||
|
|||||||
@ -1,5 +0,0 @@
|
|||||||
cd $PSScriptRoot;
|
|
||||||
$NewFile = New-TemporaryFile;
|
|
||||||
python generate_config_yml.py > $NewFile.name
|
|
||||||
(Get-Content $NewFile.name -Raw).TrimEnd().Replace("`r`n","`n") | Set-Content config.yml -Force
|
|
||||||
Remove-Item $NewFile.name
|
|
||||||
@ -1,17 +1,8 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -xe
|
||||||
|
|
||||||
# Allows this script to be invoked from any directory:
|
# Allows this script to be invoked from any directory:
|
||||||
cd "$(dirname "$0")"
|
cd $(dirname "$0")
|
||||||
|
|
||||||
UNCOMMIT_CHANGE=$(git status -s | grep " config.yml" | wc -l | xargs)
|
|
||||||
if [[ $UNCOMMIT_CHANGE != 0 ]]; then
|
|
||||||
OLD_FILE=$(mktemp)
|
|
||||||
cp config.yml "$OLD_FILE"
|
|
||||||
echo "Uncommitted change detected in .circleci/config.yml"
|
|
||||||
echo "It has been backed up to $OLD_FILE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
NEW_FILE=$(mktemp)
|
NEW_FILE=$(mktemp)
|
||||||
./generate_config_yml.py > "$NEW_FILE"
|
./generate_config_yml.py > $NEW_FILE
|
||||||
cp "$NEW_FILE" config.yml
|
cp $NEW_FILE config.yml
|
||||||
echo "New config generated in .circleci/config.yml"
|
|
||||||
|
|||||||
@ -63,7 +63,6 @@ popd
|
|||||||
# Clone the Builder master repo
|
# Clone the Builder master repo
|
||||||
retry git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
|
retry git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
|
||||||
pushd "$BUILDER_ROOT"
|
pushd "$BUILDER_ROOT"
|
||||||
git checkout release/1.9
|
|
||||||
echo "Using builder from "
|
echo "Using builder from "
|
||||||
git --no-pager log --max-count 1
|
git --no-pager log --max-count 1
|
||||||
popd
|
popd
|
||||||
|
|||||||
@ -15,7 +15,7 @@ export PATH="~/anaconda/bin:${PATH}"
|
|||||||
source ~/anaconda/bin/activate
|
source ~/anaconda/bin/activate
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests typing_extensions --yes
|
conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests --yes
|
||||||
conda install -c conda-forge valgrind --yes
|
conda install -c conda-forge valgrind --yes
|
||||||
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
||||||
|
|
||||||
|
|||||||
@ -24,6 +24,6 @@ rm cert.txt
|
|||||||
if ! [ -x "$(command -v xcodebuild)" ]; then
|
if ! [ -x "$(command -v xcodebuild)" ]; then
|
||||||
echo 'Error: xcodebuild is not installed.'
|
echo 'Error: xcodebuild is not installed.'
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
PROFILE=PyTorch_CI_2021
|
PROFILE=PyTorch_CI_2021
|
||||||
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID}
|
ruby ${PROJ_ROOT}/scripts/xcode_build.rb -i ${PROJ_ROOT}/build_ios/install -x ${PROJ_ROOT}/ios/TestApp/TestApp.xcodeproj -p ${IOS_PLATFORM} -c ${PROFILE} -t ${IOS_DEV_TEAM_ID}
|
||||||
|
|||||||
@ -9,6 +9,10 @@ python_nodot="\$(echo $DESIRED_PYTHON | tr -d m.u)"
|
|||||||
|
|
||||||
# Set up Python
|
# Set up Python
|
||||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||||
|
# There was a bug that was introduced in conda-package-handling >= 1.6.1 that makes archives
|
||||||
|
# above a certain size fail out when attempting to extract
|
||||||
|
# see: https://github.com/conda/conda-package-handling/issues/71
|
||||||
|
conda install -y conda-package-handling=1.6.0
|
||||||
retry conda create -qyn testenv python="$DESIRED_PYTHON"
|
retry conda create -qyn testenv python="$DESIRED_PYTHON"
|
||||||
source activate testenv >/dev/null
|
source activate testenv >/dev/null
|
||||||
elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
||||||
@ -34,10 +38,6 @@ if [[ "$DESIRED_CUDA" == "cu112" ]]; then
|
|||||||
EXTRA_CONDA_FLAGS="-c=conda-forge"
|
EXTRA_CONDA_FLAGS="-c=conda-forge"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Move debug wheels out of the the package dir so they don't get installed
|
|
||||||
mkdir -p /tmp/debug_final_pkgs
|
|
||||||
mv /final_pkgs/debug-*.zip /tmp/debug_final_pkgs || echo "no debug packages to move"
|
|
||||||
|
|
||||||
# Install the package
|
# Install the package
|
||||||
# These network calls should not have 'retry's because they are installing
|
# These network calls should not have 'retry's because they are installing
|
||||||
# locally and aren't actually network calls
|
# locally and aren't actually network calls
|
||||||
|
|||||||
@ -68,24 +68,12 @@ if [[ -z "$DOCKER_IMAGE" ]]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
USE_GOLD_LINKER="OFF"
|
|
||||||
# GOLD linker can not be used if CUPTI is statically linked into PyTorch, see https://github.com/pytorch/pytorch/issues/57744
|
|
||||||
if [[ ${DESIRED_CUDA} == "cpu" ]]; then
|
|
||||||
USE_GOLD_LINKER="ON"
|
|
||||||
fi
|
|
||||||
|
|
||||||
USE_WHOLE_CUDNN="OFF"
|
|
||||||
# Link whole cuDNN for CUDA-11.1 to include fp16 fast kernels
|
|
||||||
if [[ "$(uname)" == "Linux" && "${DESIRED_CUDA}" == "cu111" ]]; then
|
|
||||||
USE_WHOLE_CUDNN="ON"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Default to nightly, since that's where this normally uploads to
|
# Default to nightly, since that's where this normally uploads to
|
||||||
PIP_UPLOAD_FOLDER='nightly/'
|
PIP_UPLOAD_FOLDER='nightly/'
|
||||||
# We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it
|
# We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it
|
||||||
export DATE="$(date -u +%Y%m%d)"
|
export DATE="$(date -u +%Y%m%d)"
|
||||||
#TODO: We should be pulling semver version from the base version.txt
|
#TODO: We should be pulling semver version from the base version.txt
|
||||||
BASE_BUILD_VERSION="1.9.0.dev$DATE"
|
BASE_BUILD_VERSION="1.8.0.dev$DATE"
|
||||||
# Change BASE_BUILD_VERSION to git tag when on a git tag
|
# Change BASE_BUILD_VERSION to git tag when on a git tag
|
||||||
# Use 'git -C' to make doubly sure we're in the correct directory for checking
|
# Use 'git -C' to make doubly sure we're in the correct directory for checking
|
||||||
# the git tag
|
# the git tag
|
||||||
@ -148,7 +136,7 @@ if [[ "${BUILD_FOR_SYSTEM:-}" == "windows" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
export DATE="$DATE"
|
export DATE="$DATE"
|
||||||
export NIGHTLIES_DATE_PREAMBLE=1.9.0.dev
|
export NIGHTLIES_DATE_PREAMBLE=1.8.0.dev
|
||||||
export PYTORCH_BUILD_VERSION="$PYTORCH_BUILD_VERSION"
|
export PYTORCH_BUILD_VERSION="$PYTORCH_BUILD_VERSION"
|
||||||
export PYTORCH_BUILD_NUMBER="$PYTORCH_BUILD_NUMBER"
|
export PYTORCH_BUILD_NUMBER="$PYTORCH_BUILD_NUMBER"
|
||||||
export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
|
export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
|
||||||
@ -180,10 +168,6 @@ export CIRCLE_SHA1="$CIRCLE_SHA1"
|
|||||||
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
|
export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-}"
|
||||||
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
|
export CIRCLE_BRANCH="$CIRCLE_BRANCH"
|
||||||
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
||||||
|
|
||||||
export USE_GOLD_LINKER="${USE_GOLD_LINKER}"
|
|
||||||
export USE_GLOO_WITH_OPENSSL="ON"
|
|
||||||
export USE_WHOLE_CUDNN="${USE_WHOLE_CUDNN}"
|
|
||||||
# =================== The above code will be executed inside Docker container ===================
|
# =================== The above code will be executed inside Docker container ===================
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
|||||||
@ -31,10 +31,6 @@ if [[ "$CIRCLECI" == 'true' && -d "C:\\ProgramData\\Microsoft\\VisualStudio\\Pac
|
|||||||
mv _Instances "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
mv _Instances "C:\\ProgramData\\Microsoft\\VisualStudio\\Packages"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$CIRCLECI" == 'true' && -d "C:\\Microsoft" ]]; then
|
|
||||||
rm -rf "C:\\Microsoft\\Android*"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Free space on filesystem before build:"
|
echo "Free space on filesystem before build:"
|
||||||
df -h
|
df -h
|
||||||
|
|
||||||
|
|||||||
@ -10,7 +10,7 @@ export ANDROID_HOME=/opt/android/sdk
|
|||||||
|
|
||||||
# Must be in sync with GRADLE_VERSION in docker image for android
|
# Must be in sync with GRADLE_VERSION in docker image for android
|
||||||
# https://github.com/pietern/pytorch-dockerfiles/blob/master/build.sh#L155
|
# https://github.com/pietern/pytorch-dockerfiles/blob/master/build.sh#L155
|
||||||
export GRADLE_VERSION=6.8.3
|
export GRADLE_VERSION=4.10.3
|
||||||
export GRADLE_HOME=/opt/gradle/gradle-$GRADLE_VERSION
|
export GRADLE_HOME=/opt/gradle/gradle-$GRADLE_VERSION
|
||||||
export GRADLE_PATH=$GRADLE_HOME/bin/gradle
|
export GRADLE_PATH=$GRADLE_HOME/bin/gradle
|
||||||
|
|
||||||
|
|||||||
@ -5,7 +5,7 @@ set -eu -o pipefail
|
|||||||
export ANDROID_NDK_HOME=/opt/ndk
|
export ANDROID_NDK_HOME=/opt/ndk
|
||||||
export ANDROID_HOME=/opt/android/sdk
|
export ANDROID_HOME=/opt/android/sdk
|
||||||
|
|
||||||
export GRADLE_VERSION=6.8.3
|
export GRADLE_VERSION=4.10.3
|
||||||
export GRADLE_HOME=/opt/gradle/gradle-$GRADLE_VERSION
|
export GRADLE_HOME=/opt/gradle/gradle-$GRADLE_VERSION
|
||||||
export GRADLE_PATH=$GRADLE_HOME/bin/gradle
|
export GRADLE_PATH=$GRADLE_HOME/bin/gradle
|
||||||
|
|
||||||
@ -35,9 +35,7 @@ else
|
|||||||
echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES
|
echo "ndk.dir=/opt/ndk" >> $GRADLE_LOCAL_PROPERTIES
|
||||||
|
|
||||||
echo "SONATYPE_NEXUS_USERNAME=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES
|
echo "SONATYPE_NEXUS_USERNAME=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES
|
||||||
echo "mavenCentralRepositoryUsername=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES
|
|
||||||
echo "SONATYPE_NEXUS_PASSWORD=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES
|
echo "SONATYPE_NEXUS_PASSWORD=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES
|
||||||
echo "mavenCentralRepositoryPassword=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES
|
|
||||||
|
|
||||||
echo "signing.keyId=${ANDROID_SIGN_KEY}" >> $GRADLE_PROPERTIES
|
echo "signing.keyId=${ANDROID_SIGN_KEY}" >> $GRADLE_PROPERTIES
|
||||||
echo "signing.password=${ANDROID_SIGN_PASS}" >> $GRADLE_PROPERTIES
|
echo "signing.password=${ANDROID_SIGN_PASS}" >> $GRADLE_PROPERTIES
|
||||||
|
|||||||
@ -24,9 +24,7 @@ retry sudo apt-get -y install \
|
|||||||
echo "== DOCKER VERSION =="
|
echo "== DOCKER VERSION =="
|
||||||
docker version
|
docker version
|
||||||
|
|
||||||
if ! command -v aws >/dev/null; then
|
retry sudo pip -q install awscli==1.16.35
|
||||||
retry sudo pip3 -q install awscli==1.19.64
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||||
DRIVER_FN="NVIDIA-Linux-x86_64-460.39.run"
|
DRIVER_FN="NVIDIA-Linux-x86_64-460.39.run"
|
||||||
@ -50,50 +48,43 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
add_to_env_file() {
|
add_to_env_file() {
|
||||||
local name=$1
|
local content
|
||||||
local value=$2
|
content=$1
|
||||||
case "$value" in
|
# BASH_ENV should be set by CircleCI
|
||||||
*\ *)
|
echo "${content}" >> "${BASH_ENV:-/tmp/env}"
|
||||||
# BASH_ENV should be set by CircleCI
|
|
||||||
echo "${name}='${value}'" >> "${BASH_ENV:-/tmp/env}"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "${name}=${value}" >> "${BASH_ENV:-/tmp/env}"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
}
|
||||||
|
|
||||||
add_to_env_file IN_CI 1
|
add_to_env_file "IN_CI=1"
|
||||||
add_to_env_file COMMIT_SOURCE "${CIRCLE_BRANCH:-}"
|
add_to_env_file "COMMIT_SOURCE=${CIRCLE_BRANCH:-}"
|
||||||
add_to_env_file BUILD_ENVIRONMENT "${BUILD_ENVIRONMENT}"
|
add_to_env_file "BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}"
|
||||||
add_to_env_file CIRCLE_PULL_REQUEST "${CIRCLE_PULL_REQUEST}"
|
add_to_env_file "CIRCLE_PULL_REQUEST=${CIRCLE_PULL_REQUEST}"
|
||||||
|
|
||||||
|
|
||||||
if [[ "${BUILD_ENVIRONMENT}" == *-build ]]; then
|
if [[ "${BUILD_ENVIRONMENT}" == *-build ]]; then
|
||||||
add_to_env_file SCCACHE_BUCKET ossci-compiler-cache-circleci-v2
|
add_to_env_file "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2"
|
||||||
|
|
||||||
SCCACHE_MAX_JOBS=$(( $(nproc) - 1 ))
|
SCCACHE_MAX_JOBS=$(( $(nproc) - 1 ))
|
||||||
MEMORY_LIMIT_MAX_JOBS=8 # the "large" resource class on CircleCI has 32 CPU cores, if we use all of them we'll OOM
|
MEMORY_LIMIT_MAX_JOBS=8 # the "large" resource class on CircleCI has 32 CPU cores, if we use all of them we'll OOM
|
||||||
MAX_JOBS=$(( ${SCCACHE_MAX_JOBS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${SCCACHE_MAX_JOBS} ))
|
MAX_JOBS=$(( ${SCCACHE_MAX_JOBS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${SCCACHE_MAX_JOBS} ))
|
||||||
add_to_env_file MAX_JOBS "${MAX_JOBS}"
|
add_to_env_file "MAX_JOBS=${MAX_JOBS}"
|
||||||
|
|
||||||
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
if [ -n "${USE_CUDA_DOCKER_RUNTIME:-}" ]; then
|
||||||
add_to_env_file TORCH_CUDA_ARCH_LIST 5.2
|
add_to_env_file "TORCH_CUDA_ARCH_LIST=5.2"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
|
if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
|
||||||
# This IAM user allows write access to S3 bucket for sccache & bazels3cache
|
# This IAM user allows write access to S3 bucket for sccache & bazels3cache
|
||||||
set +x
|
set +x
|
||||||
add_to_env_file XLA_CLANG_CACHE_S3_BUCKET_NAME "${XLA_CLANG_CACHE_S3_BUCKET_NAME:-}"
|
add_to_env_file "XLA_CLANG_CACHE_S3_BUCKET_NAME=${XLA_CLANG_CACHE_S3_BUCKET_NAME:-}"
|
||||||
add_to_env_file AWS_ACCESS_KEY_ID "${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}"
|
add_to_env_file "AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}"
|
||||||
add_to_env_file AWS_SECRET_ACCESS_KEY "${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}"
|
add_to_env_file "AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V2:-}"
|
||||||
set -x
|
set -x
|
||||||
else
|
else
|
||||||
# This IAM user allows write access to S3 bucket for sccache
|
# This IAM user allows write access to S3 bucket for sccache
|
||||||
set +x
|
set +x
|
||||||
add_to_env_file XLA_CLANG_CACHE_S3_BUCKET_NAME "${XLA_CLANG_CACHE_S3_BUCKET_NAME:-}"
|
add_to_env_file "XLA_CLANG_CACHE_S3_BUCKET_NAME=${XLA_CLANG_CACHE_S3_BUCKET_NAME:-}"
|
||||||
add_to_env_file AWS_ACCESS_KEY_ID "${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}"
|
add_to_env_file "AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}"
|
||||||
add_to_env_file AWS_SECRET_ACCESS_KEY "${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}"
|
add_to_env_file "AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4:-}"
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@ -102,7 +93,5 @@ fi
|
|||||||
set +x
|
set +x
|
||||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V4:-}
|
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V4:-}
|
||||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V4:-}
|
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V4:-}
|
||||||
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
eval "$(aws ecr get-login --region us-east-1 --no-include-email)"
|
||||||
export AWS_REGION=us-east-1
|
|
||||||
aws ecr get-login-password --region $AWS_REGION|docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
|
|
||||||
set -x
|
set -x
|
||||||
|
|||||||
@ -1,140 +0,0 @@
|
|||||||
# Documentation: https://docs.microsoft.com/en-us/rest/api/azure/devops/build/?view=azure-devops-rest-6.0
|
|
||||||
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import requests
|
|
||||||
import time
|
|
||||||
|
|
||||||
AZURE_PIPELINE_BASE_URL = "https://aiinfra.visualstudio.com/PyTorch/"
|
|
||||||
AZURE_DEVOPS_PAT_BASE64 = os.environ.get("AZURE_DEVOPS_PAT_BASE64_SECRET", "")
|
|
||||||
PIPELINE_ID = "911"
|
|
||||||
PROJECT_ID = "0628bce4-2d33-499e-bac5-530e12db160f"
|
|
||||||
TARGET_BRANCH = os.environ.get("CIRCLE_BRANCH", "master")
|
|
||||||
TARGET_COMMIT = os.environ.get("CIRCLE_SHA1", "")
|
|
||||||
|
|
||||||
build_base_url = AZURE_PIPELINE_BASE_URL + "_apis/build/builds?api-version=6.0"
|
|
||||||
|
|
||||||
s = requests.Session()
|
|
||||||
s.headers.update({"Authorization": "Basic " + AZURE_DEVOPS_PAT_BASE64})
|
|
||||||
|
|
||||||
def submit_build(pipeline_id, project_id, source_branch, source_version):
|
|
||||||
print("Submitting build for branch: " + source_branch)
|
|
||||||
print("Commit SHA1: ", source_version)
|
|
||||||
|
|
||||||
run_build_raw = s.post(build_base_url, json={
|
|
||||||
"definition": {"id": pipeline_id},
|
|
||||||
"project": {"id": project_id},
|
|
||||||
"sourceBranch": source_branch,
|
|
||||||
"sourceVersion": source_version
|
|
||||||
})
|
|
||||||
|
|
||||||
try:
|
|
||||||
run_build_json = run_build_raw.json()
|
|
||||||
except json.decoder.JSONDecodeError as e:
|
|
||||||
print(e)
|
|
||||||
print("Failed to parse the response. Check if the Azure DevOps PAT is incorrect or expired.")
|
|
||||||
sys.exit(-1)
|
|
||||||
|
|
||||||
build_id = run_build_json['id']
|
|
||||||
|
|
||||||
print("Submitted bulid: " + str(build_id))
|
|
||||||
print("Bulid URL: " + run_build_json['url'])
|
|
||||||
return build_id
|
|
||||||
|
|
||||||
def get_build(_id):
|
|
||||||
get_build_url = AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}?api-version=6.0"
|
|
||||||
get_build_raw = s.get(get_build_url)
|
|
||||||
return get_build_raw.json()
|
|
||||||
|
|
||||||
def get_build_logs(_id):
|
|
||||||
get_build_logs_url = AZURE_PIPELINE_BASE_URL + f"/_apis/build/builds/{_id}/logs?api-version=6.0"
|
|
||||||
get_build_logs_raw = s.get(get_build_logs_url)
|
|
||||||
return get_build_logs_raw.json()
|
|
||||||
|
|
||||||
def get_log_content(url):
|
|
||||||
resp = s.get(url)
|
|
||||||
return resp.text
|
|
||||||
|
|
||||||
def wait_for_build(_id):
|
|
||||||
build_detail = get_build(_id)
|
|
||||||
build_status = build_detail['status']
|
|
||||||
|
|
||||||
while build_status == 'notStarted':
|
|
||||||
print('Waiting for run to start: ' + str(_id))
|
|
||||||
sys.stdout.flush()
|
|
||||||
try:
|
|
||||||
build_detail = get_build(_id)
|
|
||||||
build_status = build_detail['status']
|
|
||||||
except Exception as e:
|
|
||||||
print("Error getting build")
|
|
||||||
print(e)
|
|
||||||
|
|
||||||
time.sleep(30)
|
|
||||||
|
|
||||||
print("Bulid started: ", str(_id))
|
|
||||||
|
|
||||||
handled_logs = set()
|
|
||||||
while build_status == 'inProgress':
|
|
||||||
try:
|
|
||||||
print("Waiting for log: " + str(_id))
|
|
||||||
logs = get_build_logs(_id)
|
|
||||||
except Exception as e:
|
|
||||||
print("Error fetching logs")
|
|
||||||
print(e)
|
|
||||||
time.sleep(30)
|
|
||||||
continue
|
|
||||||
|
|
||||||
for log in logs['value']:
|
|
||||||
log_id = log['id']
|
|
||||||
if log_id in handled_logs:
|
|
||||||
continue
|
|
||||||
handled_logs.add(log_id)
|
|
||||||
print('Fetching log: \n' + log['url'])
|
|
||||||
try:
|
|
||||||
log_content = get_log_content(log['url'])
|
|
||||||
print(log_content)
|
|
||||||
except Exception as e:
|
|
||||||
print("Error getting log content")
|
|
||||||
print(e)
|
|
||||||
sys.stdout.flush()
|
|
||||||
build_detail = get_build(_id)
|
|
||||||
build_status = build_detail['status']
|
|
||||||
time.sleep(30)
|
|
||||||
|
|
||||||
build_result = build_detail['result']
|
|
||||||
|
|
||||||
print("Bulid status: " + build_status)
|
|
||||||
print("Bulid result: " + build_result)
|
|
||||||
|
|
||||||
return build_status, build_result
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
# Convert the branch name for Azure DevOps
|
|
||||||
match = re.search(r'pull/(\d+)', TARGET_BRANCH)
|
|
||||||
if match is not None:
|
|
||||||
pr_num = match.group(1)
|
|
||||||
SOURCE_BRANCH = f'refs/pull/{pr_num}/head'
|
|
||||||
else:
|
|
||||||
SOURCE_BRANCH = f'refs/heads/{TARGET_BRANCH}'
|
|
||||||
|
|
||||||
MAX_RETRY = 2
|
|
||||||
retry = MAX_RETRY
|
|
||||||
|
|
||||||
while retry > 0:
|
|
||||||
build_id = submit_build(PIPELINE_ID, PROJECT_ID, SOURCE_BRANCH, TARGET_COMMIT)
|
|
||||||
build_status, build_result = wait_for_build(build_id)
|
|
||||||
|
|
||||||
if build_result != 'succeeded':
|
|
||||||
retry = retry - 1
|
|
||||||
if retry > 0:
|
|
||||||
print("Retrying... remaining attempt: " + str(retry))
|
|
||||||
# Wait a bit before retrying
|
|
||||||
time.sleep((MAX_RETRY - retry) * 120)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
print("No more chance to retry. Giving up.")
|
|
||||||
sys.exit(-1)
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
@ -17,7 +17,7 @@ def get_size(file_dir):
|
|||||||
# we should only expect one file, if no, something is wrong
|
# we should only expect one file, if no, something is wrong
|
||||||
file_name = glob.glob(os.path.join(file_dir, "*"))[0]
|
file_name = glob.glob(os.path.join(file_dir, "*"))[0]
|
||||||
return os.stat(file_name).st_size
|
return os.stat(file_name).st_size
|
||||||
except Exception:
|
except:
|
||||||
logging.exception(f"error getting file from: {file_dir}")
|
logging.exception(f"error getting file from: {file_dir}")
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@ -145,5 +145,5 @@ if __name__ == "__main__":
|
|||||||
if size != 0:
|
if size != 0:
|
||||||
try:
|
try:
|
||||||
send_message([build_message(size)])
|
send_message([build_message(size)])
|
||||||
except Exception:
|
except:
|
||||||
logging.exception("can't send message")
|
logging.exception("can't send message")
|
||||||
|
|||||||
@ -1,10 +1,7 @@
|
|||||||
# https://developercommunity.visualstudio.com/t/install-specific-version-of-vs-component/1142479
|
$VS_DOWNLOAD_LINK = "https://aka.ms/vs/15/release/vs_buildtools.exe"
|
||||||
# https://docs.microsoft.com/en-us/visualstudio/releases/2019/history#release-dates-and-build-numbers
|
|
||||||
|
|
||||||
# 16.8.5 BuildTools
|
|
||||||
$VS_DOWNLOAD_LINK = "https://download.visualstudio.microsoft.com/download/pr/20130c62-1bc8-43d6-b4f0-c20bb7c79113/145a319d79a83376915d8f855605e152ef5f6fa2b2f1d2dca411fb03722eea72/vs_BuildTools.exe"
|
|
||||||
$COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe"
|
$COLLECT_DOWNLOAD_LINK = "https://aka.ms/vscollect.exe"
|
||||||
$VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools",
|
$VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools",
|
||||||
|
"--add Microsoft.VisualStudio.Component.VC.Tools.14.13",
|
||||||
"--add Microsoft.Component.MSBuild",
|
"--add Microsoft.Component.MSBuild",
|
||||||
"--add Microsoft.VisualStudio.Component.Roslyn.Compiler",
|
"--add Microsoft.VisualStudio.Component.Roslyn.Compiler",
|
||||||
"--add Microsoft.VisualStudio.Component.TextTemplating",
|
"--add Microsoft.VisualStudio.Component.TextTemplating",
|
||||||
@ -16,25 +13,10 @@ $VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStud
|
|||||||
|
|
||||||
curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe
|
curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe
|
||||||
if ($LASTEXITCODE -ne 0) {
|
if ($LASTEXITCODE -ne 0) {
|
||||||
echo "Download of the VS 2019 Version 16.8.5 installer failed"
|
echo "Download of the VS 2017 installer failed"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Test-Path "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe") {
|
|
||||||
$existingPath = & "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -products "Microsoft.VisualStudio.Product.BuildTools" -version "[16, 17)" -property installationPath
|
|
||||||
if ($existingPath -ne $null) {
|
|
||||||
echo "Found existing BuildTools installation in $existingPath"
|
|
||||||
$VS_UNINSTALL_ARGS = @("uninstall", "--installPath", "`"$existingPath`"", "--quiet","--wait")
|
|
||||||
$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_UNINSTALL_ARGS -NoNewWindow -Wait -PassThru
|
|
||||||
$exitCode = $process.ExitCode
|
|
||||||
if (($exitCode -ne 0) -and ($exitCode -ne 3010)) {
|
|
||||||
echo "Original BuildTools uninstall failed with code $exitCode"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
echo "Original BuildTools uninstalled"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru
|
$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru
|
||||||
Remove-Item -Path vs_installer.exe -Force
|
Remove-Item -Path vs_installer.exe -Force
|
||||||
$exitCode = $process.ExitCode
|
$exitCode = $process.ExitCode
|
||||||
|
|||||||
@ -8,18 +8,9 @@ if [[ "$cuda_major_version" == "10" ]]; then
|
|||||||
msbuild_project_dir="CUDAVisualStudioIntegration/extras/visual_studio_integration/MSBuildExtensions"
|
msbuild_project_dir="CUDAVisualStudioIntegration/extras/visual_studio_integration/MSBuildExtensions"
|
||||||
cuda_install_packages="nvcc_10.1 cuobjdump_10.1 nvprune_10.1 cupti_10.1 cublas_10.1 cublas_dev_10.1 cudart_10.1 cufft_10.1 cufft_dev_10.1 curand_10.1 curand_dev_10.1 cusolver_10.1 cusolver_dev_10.1 cusparse_10.1 cusparse_dev_10.1 nvgraph_10.1 nvgraph_dev_10.1 npp_10.1 npp_dev_10.1 nvrtc_10.1 nvrtc_dev_10.1 nvml_dev_10.1"
|
cuda_install_packages="nvcc_10.1 cuobjdump_10.1 nvprune_10.1 cupti_10.1 cublas_10.1 cublas_dev_10.1 cudart_10.1 cufft_10.1 cufft_dev_10.1 curand_10.1 curand_dev_10.1 cusolver_10.1 cusolver_dev_10.1 cusparse_10.1 cusparse_dev_10.1 nvgraph_10.1 nvgraph_dev_10.1 npp_10.1 npp_dev_10.1 nvrtc_10.1 nvrtc_dev_10.1 nvml_dev_10.1"
|
||||||
elif [[ "$cuda_major_version" == "11" ]]; then
|
elif [[ "$cuda_major_version" == "11" ]]; then
|
||||||
if [[ "${CUDA_VERSION}" == "11.1" ]]; then
|
cuda_installer_name="cuda_11.1.0_456.43_win10"
|
||||||
cuda_installer_name="cuda_11.1.0_456.43_win10"
|
msbuild_project_dir="visual_studio_integration/CUDAVisualStudioIntegration/extras/visual_studio_integration/MSBuildExtensions"
|
||||||
msbuild_project_dir="visual_studio_integration/CUDAVisualStudioIntegration/extras/visual_studio_integration/MSBuildExtensions"
|
cuda_install_packages="nvcc_11.1 cuobjdump_11.1 nvprune_11.1 nvprof_11.1 cupti_11.1 cublas_11.1 cublas_dev_11.1 cudart_11.1 cufft_11.1 cufft_dev_11.1 curand_11.1 curand_dev_11.1 cusolver_11.1 cusolver_dev_11.1 cusparse_11.1 cusparse_dev_11.1 npp_11.1 npp_dev_11.1 nvrtc_11.1 nvrtc_dev_11.1 nvml_dev_11.1"
|
||||||
cuda_install_packages="nvcc_11.1 cuobjdump_11.1 nvprune_11.1 nvprof_11.1 cupti_11.1 cublas_11.1 cublas_dev_11.1 cudart_11.1 cufft_11.1 cufft_dev_11.1 curand_11.1 curand_dev_11.1 cusolver_11.1 cusolver_dev_11.1 cusparse_11.1 cusparse_dev_11.1 npp_11.1 npp_dev_11.1 nvrtc_11.1 nvrtc_dev_11.1 nvml_dev_11.1"
|
|
||||||
elif [[ "${CUDA_VERSION}" == "11.3" ]]; then
|
|
||||||
cuda_installer_name="cuda_11.3.0_465.89_win10"
|
|
||||||
msbuild_project_dir="visual_studio_integration/CUDAVisualStudioIntegration/extras/visual_studio_integration/MSBuildExtensions"
|
|
||||||
cuda_install_packages="thrust_11.3 nvcc_11.3 cuobjdump_11.3 nvprune_11.3 nvprof_11.3 cupti_11.3 cublas_11.3 cublas_dev_11.3 cudart_11.3 cufft_11.3 cufft_dev_11.3 curand_11.3 curand_dev_11.3 cusolver_11.3 cusolver_dev_11.3 cusparse_11.3 cusparse_dev_11.3 npp_11.3 npp_dev_11.3 nvrtc_11.3 nvrtc_dev_11.3 nvml_dev_11.3"
|
|
||||||
else
|
|
||||||
echo "This should not happen! ABORT."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
echo "CUDA_VERSION $CUDA_VERSION is not supported yet"
|
echo "CUDA_VERSION $CUDA_VERSION is not supported yet"
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
@ -6,14 +6,7 @@ cuda_major_version=${CUDA_VERSION%.*}
|
|||||||
if [[ "$cuda_major_version" == "10" ]]; then
|
if [[ "$cuda_major_version" == "10" ]]; then
|
||||||
cudnn_installer_name="cudnn-${CUDA_VERSION}-windows10-x64-v7.6.4.38"
|
cudnn_installer_name="cudnn-${CUDA_VERSION}-windows10-x64-v7.6.4.38"
|
||||||
elif [[ "$cuda_major_version" == "11" ]]; then
|
elif [[ "$cuda_major_version" == "11" ]]; then
|
||||||
if [[ "${CUDA_VERSION}" == "11.1" ]]; then
|
cudnn_installer_name="cudnn-${CUDA_VERSION}-windows-x64-v8.0.5.39"
|
||||||
cudnn_installer_name="cudnn-${CUDA_VERSION}-windows-x64-v8.0.5.39"
|
|
||||||
elif [[ "${CUDA_VERSION}" == "11.3" ]]; then
|
|
||||||
cudnn_installer_name="cudnn-${CUDA_VERSION}-windows-x64-v8.2.0.53"
|
|
||||||
else
|
|
||||||
echo "This should not happen! ABORT."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
echo "CUDNN for CUDA_VERSION $CUDA_VERSION is not supported yet"
|
echo "CUDNN for CUDA_VERSION $CUDA_VERSION is not supported yet"
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
@ -22,24 +22,6 @@ pytorch_params: &pytorch_params
|
|||||||
BUILD_ONLY: << parameters.build_only >>
|
BUILD_ONLY: << parameters.build_only >>
|
||||||
resource_class: << parameters.resource_class >>
|
resource_class: << parameters.resource_class >>
|
||||||
|
|
||||||
pytorch_android_params: &pytorch_android_params
|
|
||||||
parameters:
|
|
||||||
build_environment:
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
op_list:
|
|
||||||
type: string
|
|
||||||
default: ""
|
|
||||||
lite_interpreter:
|
|
||||||
type: string
|
|
||||||
default: "1"
|
|
||||||
environment:
|
|
||||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single
|
|
||||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c"
|
|
||||||
PYTHON_VERSION: "3.6"
|
|
||||||
SELECTED_OP_LIST: << parameters.op_list >>
|
|
||||||
BUILD_LITE_INTERPRETER: << parameters.lite_interpreter >>
|
|
||||||
|
|
||||||
pytorch_ios_params: &pytorch_ios_params
|
pytorch_ios_params: &pytorch_ios_params
|
||||||
parameters:
|
parameters:
|
||||||
build_environment:
|
build_environment:
|
||||||
@ -57,16 +39,12 @@ pytorch_ios_params: &pytorch_ios_params
|
|||||||
use_metal:
|
use_metal:
|
||||||
type: string
|
type: string
|
||||||
default: "0"
|
default: "0"
|
||||||
lite_interpreter:
|
|
||||||
type: string
|
|
||||||
default: "1"
|
|
||||||
environment:
|
environment:
|
||||||
BUILD_ENVIRONMENT: << parameters.build_environment >>
|
BUILD_ENVIRONMENT: << parameters.build_environment >>
|
||||||
IOS_ARCH: << parameters.ios_arch >>
|
IOS_ARCH: << parameters.ios_arch >>
|
||||||
IOS_PLATFORM: << parameters.ios_platform >>
|
IOS_PLATFORM: << parameters.ios_platform >>
|
||||||
SELECTED_OP_LIST: << parameters.op_list >>
|
SELECTED_OP_LIST: << parameters.op_list >>
|
||||||
USE_PYTORCH_METAL: << parameters.use_metal >>
|
USE_PYTORCH_METAL: << parameters.use_metal >>
|
||||||
BUILD_LITE_INTERPRETER: << parameters.lite_interpreter >>
|
|
||||||
|
|
||||||
pytorch_windows_params: &pytorch_windows_params
|
pytorch_windows_params: &pytorch_windows_params
|
||||||
parameters:
|
parameters:
|
||||||
@ -106,6 +84,6 @@ pytorch_windows_params: &pytorch_windows_params
|
|||||||
VC_YEAR: <<parameters.vc_year>>
|
VC_YEAR: <<parameters.vc_year>>
|
||||||
VC_PRODUCT: <<parameters.vc_product>>
|
VC_PRODUCT: <<parameters.vc_product>>
|
||||||
USE_CUDA: <<parameters.use_cuda>>
|
USE_CUDA: <<parameters.use_cuda>>
|
||||||
TORCH_CUDA_ARCH_LIST: "5.2 7.5"
|
TORCH_CUDA_ARCH_LIST: "7.5"
|
||||||
JOB_BASE_NAME: <<parameters.test_name>>
|
JOB_BASE_NAME: <<parameters.test_name>>
|
||||||
JOB_EXECUTOR: <<parameters.executor>>
|
JOB_EXECUTOR: <<parameters.executor>>
|
||||||
|
|||||||
@ -111,11 +111,11 @@ commands:
|
|||||||
git config --global user.email "circleci.ossci@gmail.com"
|
git config --global user.email "circleci.ossci@gmail.com"
|
||||||
git config --global user.name "CircleCI"
|
git config --global user.name "CircleCI"
|
||||||
git config remote.origin.url https://github.com/pytorch/pytorch.git
|
git config remote.origin.url https://github.com/pytorch/pytorch.git
|
||||||
git config --add remote.origin.fetch +refs/heads/master:refs/remotes/origin/master
|
git config --add remote.origin.fetch +refs/heads/release/1.8:refs/remotes/origin/release/1.8
|
||||||
git fetch --tags --progress https://github.com/pytorch/pytorch.git +refs/heads/master:refs/remotes/origin/master --depth=100 --quiet
|
git fetch --tags --progress https://github.com/pytorch/pytorch.git +refs/heads/release/1.8:refs/remotes/origin/release/1.8 --depth=100 --quiet
|
||||||
# PRs generated from ghstack has format CIRCLE_PR_BASE_BRANCH=gh/xxx/1234/base
|
# PRs generated from ghstack has format CIRCLE_PR_BASE_BRANCH=gh/xxx/1234/base
|
||||||
if [[ "${CIRCLE_PR_BASE_BRANCH}" == "gh/"* ]]; then
|
if [[ "${CIRCLE_PR_BASE_BRANCH}" == "gh/"* ]]; then
|
||||||
CIRCLE_PR_BASE_BRANCH=master
|
CIRCLE_PR_BASE_BRANCH=release/1.8
|
||||||
fi
|
fi
|
||||||
export GIT_MERGE_TARGET=`git log -n 1 --pretty=format:"%H" origin/$CIRCLE_PR_BASE_BRANCH`
|
export GIT_MERGE_TARGET=`git log -n 1 --pretty=format:"%H" origin/$CIRCLE_PR_BASE_BRANCH`
|
||||||
echo "GIT_MERGE_TARGET: " ${GIT_MERGE_TARGET}
|
echo "GIT_MERGE_TARGET: " ${GIT_MERGE_TARGET}
|
||||||
|
|||||||
@ -14,15 +14,19 @@ parameters:
|
|||||||
run_build:
|
run_build:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: true
|
default: true
|
||||||
run_master_build:
|
|
||||||
type: boolean
|
docker_config_defaults: &docker_config_defaults
|
||||||
default: false
|
user: jenkins
|
||||||
|
aws_auth:
|
||||||
|
# This IAM user only allows read-write access to ECR
|
||||||
|
aws_access_key_id: ${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V4}
|
||||||
|
aws_secret_access_key: ${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V4}
|
||||||
|
|
||||||
executors:
|
executors:
|
||||||
windows-with-nvidia-gpu:
|
windows-with-nvidia-gpu:
|
||||||
machine:
|
machine:
|
||||||
resource_class: windows.gpu.nvidia.medium
|
resource_class: windows.gpu.nvidia.medium
|
||||||
image: windows-server-2019-nvidia:previous
|
image: windows-server-2019-nvidia:stable
|
||||||
shell: bash.exe
|
shell: bash.exe
|
||||||
|
|
||||||
windows-xlarge-cpu-with-nvidia-cuda:
|
windows-xlarge-cpu-with-nvidia-cuda:
|
||||||
|
|||||||
@ -45,7 +45,7 @@
|
|||||||
binary_linux_test:
|
binary_linux_test:
|
||||||
<<: *binary_linux_test_upload_params
|
<<: *binary_linux_test_upload_params
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||||
- checkout
|
- checkout
|
||||||
@ -108,7 +108,7 @@
|
|||||||
smoke_linux_test:
|
smoke_linux_test:
|
||||||
<<: *binary_linux_test_upload_params
|
<<: *binary_linux_test_upload_params
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
@ -293,6 +293,11 @@
|
|||||||
steps:
|
steps:
|
||||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||||
- checkout
|
- checkout
|
||||||
|
- run:
|
||||||
|
name: _HACK_ Install CUDA compatible cmath
|
||||||
|
no_output_timeout: 1m
|
||||||
|
command: |
|
||||||
|
powershell .circleci/scripts/vs_install_cmath.ps1
|
||||||
- run:
|
- run:
|
||||||
<<: *binary_checkout
|
<<: *binary_checkout
|
||||||
- run:
|
- run:
|
||||||
@ -308,8 +313,6 @@
|
|||||||
- persist_to_workspace:
|
- persist_to_workspace:
|
||||||
root: "C:/w"
|
root: "C:/w"
|
||||||
paths: final_pkgs
|
paths: final_pkgs
|
||||||
- store_artifacts:
|
|
||||||
path: C:/w/final_pkgs
|
|
||||||
|
|
||||||
binary_windows_test:
|
binary_windows_test:
|
||||||
<<: *binary_windows_params
|
<<: *binary_windows_params
|
||||||
@ -392,3 +395,4 @@
|
|||||||
command: |
|
command: |
|
||||||
ANACONDA_API_TOKEN="${CONDA_PYTORCHBOT_TOKEN}" \
|
ANACONDA_API_TOKEN="${CONDA_PYTORCHBOT_TOKEN}" \
|
||||||
scripts/release/anaconda-prune/run.sh
|
scripts/release/anaconda-prune/run.sh
|
||||||
|
|
||||||
|
|||||||
@ -8,7 +8,7 @@
|
|||||||
# then install the one with the most recent version.
|
# then install the one with the most recent version.
|
||||||
update_s3_htmls: &update_s3_htmls
|
update_s3_htmls: &update_s3_htmls
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
resource_class: medium
|
resource_class: medium
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
|
|||||||
@ -4,7 +4,7 @@
|
|||||||
type: string
|
type: string
|
||||||
default: ""
|
default: ""
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
resource_class: large
|
resource_class: large
|
||||||
environment:
|
environment:
|
||||||
IMAGE_NAME: << parameters.image_name >>
|
IMAGE_NAME: << parameters.image_name >>
|
||||||
@ -20,10 +20,7 @@
|
|||||||
set +x
|
set +x
|
||||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||||
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
eval $(aws ecr get-login --no-include-email --region us-east-1)
|
||||||
export AWS_REGION=us-east-1
|
|
||||||
aws ecr get-login-password --region $AWS_REGION|docker login --username AWS \
|
|
||||||
--password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
|
|
||||||
set -x
|
set -x
|
||||||
# Check if image already exists, if it does then skip building it
|
# Check if image already exists, if it does then skip building it
|
||||||
if docker manifest inspect "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/${IMAGE_NAME}:${DOCKER_TAG}"; then
|
if docker manifest inspect "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/${IMAGE_NAME}:${DOCKER_TAG}"; then
|
||||||
@ -56,7 +53,7 @@
|
|||||||
cd .circleci/docker && ./build_docker.sh
|
cd .circleci/docker && ./build_docker.sh
|
||||||
docker_for_ecr_gc_build_job:
|
docker_for_ecr_gc_build_job:
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
@ -68,12 +65,9 @@
|
|||||||
set +x
|
set +x
|
||||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_DOCKER_BUILDER_V1}
|
||||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_DOCKER_BUILDER_V1}
|
||||||
export AWS_ACCOUNT_ID=$(aws sts get-caller-identity|grep Account|cut -f4 -d\")
|
eval $(aws ecr get-login --no-include-email --region us-east-1)
|
||||||
export AWS_REGION=us-east-1
|
|
||||||
aws ecr get-login-password --region $AWS_REGION|docker login --username AWS \
|
|
||||||
--password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
|
|
||||||
set -x
|
set -x
|
||||||
docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/gc/ecr
|
docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/gc/ecr
|
||||||
ecr_gc_job:
|
ecr_gc_job:
|
||||||
parameters:
|
parameters:
|
||||||
project:
|
project:
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
pytorch_doc_push:
|
pytorch_doc_push:
|
||||||
resource_class: medium
|
resource_class: medium
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
parameters:
|
parameters:
|
||||||
branch:
|
branch:
|
||||||
type: string
|
type: string
|
||||||
@ -30,7 +30,7 @@
|
|||||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4"
|
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4"
|
||||||
resource_class: large
|
resource_class: large
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
@ -75,7 +75,7 @@
|
|||||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4"
|
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4"
|
||||||
resource_class: large
|
resource_class: large
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
@ -111,43 +111,6 @@
|
|||||||
paths:
|
paths:
|
||||||
- .
|
- .
|
||||||
|
|
||||||
pytorch_macos_10_15_py3_build:
|
|
||||||
environment:
|
|
||||||
BUILD_ENVIRONMENT: pytorch-macos-10.15-py3-arm64-build
|
|
||||||
macos:
|
|
||||||
xcode: "12.3.0"
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run_brew_for_macos_build
|
|
||||||
- run:
|
|
||||||
name: Build
|
|
||||||
no_output_timeout: "1h"
|
|
||||||
command: |
|
|
||||||
set -e
|
|
||||||
export IN_CI=1
|
|
||||||
export CROSS_COMPILE_ARM64=1
|
|
||||||
|
|
||||||
# Install sccache
|
|
||||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
|
||||||
sudo chmod +x /usr/local/bin/sccache
|
|
||||||
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
|
|
||||||
|
|
||||||
# This IAM user allows write access to S3 bucket for sccache
|
|
||||||
set +x
|
|
||||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}
|
|
||||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}
|
|
||||||
set -x
|
|
||||||
|
|
||||||
chmod a+x .jenkins/pytorch/macos-build.sh
|
|
||||||
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
|
|
||||||
|
|
||||||
- persist_to_workspace:
|
|
||||||
root: /Users/distiller/workspace/
|
|
||||||
paths:
|
|
||||||
- miniconda3
|
|
||||||
- store_artifacts:
|
|
||||||
path: /Users/distiller/project/dist
|
|
||||||
|
|
||||||
pytorch_macos_10_13_py3_build:
|
pytorch_macos_10_13_py3_build:
|
||||||
environment:
|
environment:
|
||||||
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-build
|
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-build
|
||||||
@ -164,7 +127,7 @@
|
|||||||
export IN_CI=1
|
export IN_CI=1
|
||||||
|
|
||||||
# Install sccache
|
# Install sccache
|
||||||
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
|
sudo curl --retry 3 https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache
|
||||||
sudo chmod +x /usr/local/bin/sccache
|
sudo chmod +x /usr/local/bin/sccache
|
||||||
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
|
export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2
|
||||||
|
|
||||||
@ -201,42 +164,6 @@
|
|||||||
|
|
||||||
chmod a+x .jenkins/pytorch/macos-test.sh
|
chmod a+x .jenkins/pytorch/macos-test.sh
|
||||||
unbuffer .jenkins/pytorch/macos-test.sh 2>&1 | ts
|
unbuffer .jenkins/pytorch/macos-test.sh 2>&1 | ts
|
||||||
- run:
|
|
||||||
name: Report results
|
|
||||||
no_output_timeout: "5m"
|
|
||||||
command: |
|
|
||||||
set -ex
|
|
||||||
source /Users/distiller/workspace/miniconda3/bin/activate
|
|
||||||
pip install boto3
|
|
||||||
export PYTHONPATH="$PWD"
|
|
||||||
|
|
||||||
# Using the same IAM user to write stats to our OSS bucket
|
|
||||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V4}
|
|
||||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V4}
|
|
||||||
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test
|
|
||||||
when: always
|
|
||||||
- store_test_results:
|
|
||||||
path: test/test-reports
|
|
||||||
|
|
||||||
pytorch_macos_10_13_py3_lite_interpreter_build_test:
|
|
||||||
environment:
|
|
||||||
BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-test
|
|
||||||
macos:
|
|
||||||
xcode: "12.0"
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- attach_workspace:
|
|
||||||
at: ~/workspace
|
|
||||||
- run_brew_for_macos_build
|
|
||||||
- run:
|
|
||||||
name: Test
|
|
||||||
no_output_timeout: "1h"
|
|
||||||
command: |
|
|
||||||
set -e
|
|
||||||
export IN_CI=1
|
|
||||||
export BUILD_LITE_INTERPRETER=1
|
|
||||||
chmod a+x ${HOME}/project/.jenkins/pytorch/macos-lite-interpreter-build-test.sh
|
|
||||||
unbuffer ${HOME}/project/.jenkins/pytorch/macos-lite-interpreter-build-test.sh 2>&1 | ts
|
|
||||||
- store_test_results:
|
- store_test_results:
|
||||||
path: test/test-reports
|
path: test/test-reports
|
||||||
|
|
||||||
@ -247,7 +174,7 @@
|
|||||||
PYTHON_VERSION: "3.6"
|
PYTHON_VERSION: "3.6"
|
||||||
resource_class: large
|
resource_class: large
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
@ -336,7 +263,7 @@
|
|||||||
PYTHON_VERSION: "3.6"
|
PYTHON_VERSION: "3.6"
|
||||||
resource_class: large
|
resource_class: large
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
@ -372,7 +299,7 @@
|
|||||||
PYTHON_VERSION: "3.6"
|
PYTHON_VERSION: "3.6"
|
||||||
resource_class: large
|
resource_class: large
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
@ -408,10 +335,13 @@
|
|||||||
destination: artifacts.tgz
|
destination: artifacts.tgz
|
||||||
|
|
||||||
pytorch_android_gradle_custom_build_single:
|
pytorch_android_gradle_custom_build_single:
|
||||||
<<: *pytorch_android_params
|
environment:
|
||||||
|
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3-clang5-android-ndk-r19c-gradle-custom-build-single
|
||||||
|
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c"
|
||||||
|
PYTHON_VERSION: "3.6"
|
||||||
resource_class: large
|
resource_class: large
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
@ -431,11 +361,11 @@
|
|||||||
echo "DOCKER_IMAGE: ${DOCKER_IMAGE}:${DOCKER_TAG}"
|
echo "DOCKER_IMAGE: ${DOCKER_IMAGE}:${DOCKER_TAG}"
|
||||||
time docker pull ${DOCKER_IMAGE}:${DOCKER_TAG} >/dev/null
|
time docker pull ${DOCKER_IMAGE}:${DOCKER_TAG} >/dev/null
|
||||||
|
|
||||||
git submodule sync && git submodule update -q --init --recursive --depth 1
|
git submodule sync && git submodule update -q --init --recursive
|
||||||
VOLUME_MOUNTS="-v /home/circleci/project/:/var/lib/jenkins/workspace"
|
VOLUME_MOUNTS="-v /home/circleci/project/:/var/lib/jenkins/workspace"
|
||||||
export id=$(docker run --env-file "${BASH_ENV}" ${VOLUME_MOUNTS} --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}:${DOCKER_TAG})
|
export id=$(docker run --env-file "${BASH_ENV}" ${VOLUME_MOUNTS} --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}:${DOCKER_TAG})
|
||||||
|
|
||||||
export COMMAND='((echo "export GRADLE_OFFLINE=1" && echo "export BUILD_LITE_INTERPRETER=${BUILD_LITE_INTERPRETER}" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/build_android_gradle.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
export COMMAND='((echo "export GRADLE_OFFLINE=1" && echo "sudo chown -R jenkins workspace && cd workspace && ./.circleci/scripts/build_android_gradle.sh") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||||
|
|
||||||
# Skip docker push as this job is purely for size analysis purpose.
|
# Skip docker push as this job is purely for size analysis purpose.
|
||||||
@ -500,7 +430,7 @@
|
|||||||
# sync submodules
|
# sync submodules
|
||||||
cd ${PROJ_ROOT}
|
cd ${PROJ_ROOT}
|
||||||
git submodule sync
|
git submodule sync
|
||||||
git submodule update --init --recursive --depth 1
|
git submodule update --init --recursive
|
||||||
|
|
||||||
# export
|
# export
|
||||||
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
|
||||||
@ -510,7 +440,6 @@
|
|||||||
echo "IOS_ARCH: ${IOS_ARCH}"
|
echo "IOS_ARCH: ${IOS_ARCH}"
|
||||||
echo "IOS_PLATFORM: ${IOS_PLATFORM}"
|
echo "IOS_PLATFORM: ${IOS_PLATFORM}"
|
||||||
echo "USE_PYTORCH_METAL": "${USE_METAL}"
|
echo "USE_PYTORCH_METAL": "${USE_METAL}"
|
||||||
echo "BUILD_LITE_INTERPRETER": "${BUILD_LITE_INTERPRETER}"
|
|
||||||
|
|
||||||
#check the custom build flag
|
#check the custom build flag
|
||||||
echo "SELECTED_OP_LIST: ${SELECTED_OP_LIST}"
|
echo "SELECTED_OP_LIST: ${SELECTED_OP_LIST}"
|
||||||
@ -528,10 +457,6 @@
|
|||||||
no_output_timeout: "30m"
|
no_output_timeout: "30m"
|
||||||
command: |
|
command: |
|
||||||
set -e
|
set -e
|
||||||
if [ ${BUILD_LITE_INTERPRETER} == 0 ]; then
|
|
||||||
echo "Run Build Test is not for full jit, skipping."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
PROJ_ROOT=/Users/distiller/project
|
PROJ_ROOT=/Users/distiller/project
|
||||||
PROFILE=PyTorch_CI_2021
|
PROFILE=PyTorch_CI_2021
|
||||||
# run the ruby build script
|
# run the ruby build script
|
||||||
@ -557,9 +482,6 @@
|
|||||||
if [ ${IOS_PLATFORM} != "SIMULATOR" ]; then
|
if [ ${IOS_PLATFORM} != "SIMULATOR" ]; then
|
||||||
echo "not SIMULATOR build, skip it."
|
echo "not SIMULATOR build, skip it."
|
||||||
exit 0
|
exit 0
|
||||||
elif [ ${BUILD_LITE_INTERPRETER} == 0 ]; then
|
|
||||||
echo "Run Simulator Tests is not for full jit, skipping."
|
|
||||||
exit 0
|
|
||||||
fi
|
fi
|
||||||
WORKSPACE=/Users/distiller/workspace
|
WORKSPACE=/Users/distiller/workspace
|
||||||
PROJ_ROOT=/Users/distiller/project
|
PROJ_ROOT=/Users/distiller/project
|
||||||
@ -575,7 +497,7 @@
|
|||||||
pytorch_linux_bazel_build:
|
pytorch_linux_bazel_build:
|
||||||
<<: *pytorch_params
|
<<: *pytorch_params
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
@ -593,7 +515,7 @@
|
|||||||
|
|
||||||
echo "Do NOT merge master branch into $CIRCLE_BRANCH in environment $BUILD_ENVIRONMENT"
|
echo "Do NOT merge master branch into $CIRCLE_BRANCH in environment $BUILD_ENVIRONMENT"
|
||||||
|
|
||||||
git submodule sync && git submodule update -q --init --recursive --depth 1
|
git submodule sync && git submodule update -q --init --recursive
|
||||||
|
|
||||||
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
|
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
|
||||||
|
|
||||||
@ -613,7 +535,7 @@
|
|||||||
pytorch_linux_bazel_test:
|
pytorch_linux_bazel_test:
|
||||||
<<: *pytorch_params
|
<<: *pytorch_params
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
@ -654,26 +576,13 @@
|
|||||||
- store_test_results:
|
- store_test_results:
|
||||||
path: bazel-testlogs
|
path: bazel-testlogs
|
||||||
|
|
||||||
pytorch_windows_test_multigpu:
|
|
||||||
machine:
|
|
||||||
image: ubuntu-2004:202104-01
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- run:
|
|
||||||
name: Test
|
|
||||||
no_output_timeout: "90m"
|
|
||||||
command: |
|
|
||||||
set -e
|
|
||||||
python3 -m pip install requests
|
|
||||||
python3 ./.circleci/scripts/trigger_azure_pipeline.py
|
|
||||||
|
|
||||||
pytorch_doc_test:
|
pytorch_doc_test:
|
||||||
environment:
|
environment:
|
||||||
BUILD_ENVIRONMENT: pytorch-doc-test
|
BUILD_ENVIRONMENT: pytorch-doc-test
|
||||||
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4"
|
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4"
|
||||||
resource_class: medium
|
resource_class: medium
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- calculate_docker_image_tag
|
- calculate_docker_image_tag
|
||||||
|
|||||||
@ -2,7 +2,7 @@ jobs:
|
|||||||
pytorch_linux_build:
|
pytorch_linux_build:
|
||||||
<<: *pytorch_params
|
<<: *pytorch_params
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||||
- checkout
|
- checkout
|
||||||
@ -15,6 +15,9 @@ jobs:
|
|||||||
no_output_timeout: "1h"
|
no_output_timeout: "1h"
|
||||||
command: |
|
command: |
|
||||||
set -e
|
set -e
|
||||||
|
if [[ "${DOCKER_IMAGE}" == *rocm3.9* ]]; then
|
||||||
|
export DOCKER_TAG="f3d89a32912f62815e4feaeed47e564e887dffd6"
|
||||||
|
fi
|
||||||
if [[ ${BUILD_ENVIRONMENT} == *"pure_torch"* ]]; then
|
if [[ ${BUILD_ENVIRONMENT} == *"pure_torch"* ]]; then
|
||||||
echo 'BUILD_CAFFE2=OFF' >> "${BASH_ENV}"
|
echo 'BUILD_CAFFE2=OFF' >> "${BASH_ENV}"
|
||||||
fi
|
fi
|
||||||
@ -30,11 +33,11 @@ jobs:
|
|||||||
time docker pull ${DOCKER_IMAGE}:${DOCKER_TAG} >/dev/null
|
time docker pull ${DOCKER_IMAGE}:${DOCKER_TAG} >/dev/null
|
||||||
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}:${DOCKER_TAG})
|
export id=$(docker run --env-file "${BASH_ENV}" --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}:${DOCKER_TAG})
|
||||||
|
|
||||||
git submodule sync && git submodule update -q --init --recursive --depth 1
|
git submodule sync && git submodule update -q --init --recursive
|
||||||
|
|
||||||
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
|
docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace
|
||||||
|
|
||||||
export COMMAND='((echo "sudo chown -R jenkins workspace && export CIRCLE_JOB="$CIRCLE_JOB" && cd workspace && .jenkins/pytorch/build.sh && find ${BUILD_ROOT} -type f -name "*.a" -or -name "*.o" -delete") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
export COMMAND='((echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh && find ${BUILD_ROOT} -type f -name "*.a" -or -name "*.o" -delete") | docker exec -u jenkins -i "$id" bash) 2>&1'
|
||||||
|
|
||||||
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts
|
||||||
|
|
||||||
@ -80,7 +83,7 @@ jobs:
|
|||||||
pytorch_linux_test:
|
pytorch_linux_test:
|
||||||
<<: *pytorch_params
|
<<: *pytorch_params
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202104-01
|
image: ubuntu-1604:202007-01
|
||||||
steps:
|
steps:
|
||||||
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
# See Note [Workspace for CircleCI scripts] in job-specs-setup.yml
|
||||||
- checkout
|
- checkout
|
||||||
@ -165,7 +168,6 @@ jobs:
|
|||||||
# =================== The following code will be executed inside Docker container ===================
|
# =================== The following code will be executed inside Docker container ===================
|
||||||
set -ex
|
set -ex
|
||||||
export SCRIBE_GRAPHQL_ACCESS_TOKEN="${SCRIBE_GRAPHQL_ACCESS_TOKEN}"
|
export SCRIBE_GRAPHQL_ACCESS_TOKEN="${SCRIBE_GRAPHQL_ACCESS_TOKEN}"
|
||||||
export CIRCLE_JOB="$CIRCLE_JOB"
|
|
||||||
${PARALLEL_FLAGS}
|
${PARALLEL_FLAGS}
|
||||||
cd workspace
|
cd workspace
|
||||||
EOL
|
EOL
|
||||||
@ -180,27 +182,11 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
echo "(cat docker_commands.sh | docker exec -u jenkins -i "$id" bash) 2>&1" > command.sh
|
echo "(cat docker_commands.sh | docker exec -u jenkins -i "$id" bash) 2>&1" > command.sh
|
||||||
unbuffer bash command.sh | ts
|
unbuffer bash command.sh | ts
|
||||||
|
|
||||||
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* ]]; then
|
|
||||||
echo "Retrieving C++ coverage report"
|
|
||||||
docker cp $id:/var/lib/jenkins/workspace/build/coverage.info ./test
|
|
||||||
fi
|
|
||||||
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* || ${BUILD_ENVIRONMENT} == *"onnx"* ]]; then
|
|
||||||
echo "Retrieving Python coverage report"
|
|
||||||
docker cp $id:/var/lib/jenkins/workspace/test/.coverage ./test
|
|
||||||
docker cp $id:/var/lib/jenkins/workspace/test/coverage.xml ./test
|
|
||||||
python3 -mpip install codecov
|
|
||||||
python3 -mcodecov
|
|
||||||
fi
|
|
||||||
- run:
|
- run:
|
||||||
name: Report results
|
name: Report results
|
||||||
no_output_timeout: "5m"
|
no_output_timeout: "5m"
|
||||||
command: |
|
command: |
|
||||||
set -e
|
set -e
|
||||||
# Retrieving test results should be done as very first step as command never fails
|
|
||||||
# But is always executed if previous step fails for some reason
|
|
||||||
echo "Retrieving test reports"
|
|
||||||
docker cp $id:/var/lib/jenkins/workspace/test/test-reports ./ || echo 'No test reports found!'
|
|
||||||
docker stats --all --no-stream
|
docker stats --all --no-stream
|
||||||
|
|
||||||
cat >docker_commands.sh \<<EOL
|
cat >docker_commands.sh \<<EOL
|
||||||
@ -215,18 +201,27 @@ jobs:
|
|||||||
export CIRCLE_JOB="$CIRCLE_JOB"
|
export CIRCLE_JOB="$CIRCLE_JOB"
|
||||||
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
export CIRCLE_WORKFLOW_ID="$CIRCLE_WORKFLOW_ID"
|
||||||
cd workspace
|
cd workspace
|
||||||
export PYTHONPATH="\${PWD}"
|
python test/print_test_stats.py --upload-to-s3 test
|
||||||
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test
|
|
||||||
EOL
|
EOL
|
||||||
echo "(cat docker_commands.sh | docker exec -u jenkins -e LANG=C.UTF-8 -i "$id" bash) 2>&1" > command.sh
|
echo "(cat docker_commands.sh | docker exec -u jenkins -i "$id" bash) 2>&1" > command.sh
|
||||||
unbuffer bash command.sh | ts
|
unbuffer bash command.sh | ts
|
||||||
|
|
||||||
|
echo "Retrieving test reports"
|
||||||
|
docker cp $id:/var/lib/jenkins/workspace/test/test-reports ./ || echo 'No test reports found!'
|
||||||
|
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* ]]; then
|
||||||
|
echo "Retrieving C++ coverage report"
|
||||||
|
docker cp $id:/var/lib/jenkins/workspace/build/coverage.info ./test
|
||||||
|
fi
|
||||||
|
if [[ ${BUILD_ENVIRONMENT} == *"coverage"* || ${BUILD_ENVIRONMENT} == *"onnx"* ]]; then
|
||||||
|
echo "Retrieving Python coverage report"
|
||||||
|
docker cp $id:/var/lib/jenkins/workspace/test/.coverage ./test
|
||||||
|
docker cp $id:/var/lib/jenkins/workspace/test/coverage.xml ./test
|
||||||
|
python3 -mpip install codecov
|
||||||
|
python3 -mcodecov
|
||||||
|
fi
|
||||||
when: always
|
when: always
|
||||||
- store_test_results:
|
- store_test_results:
|
||||||
path: test-reports
|
path: test-reports
|
||||||
- store_artifacts:
|
|
||||||
path: test/.coverage
|
|
||||||
- store_artifacts:
|
|
||||||
path: test/coverage.xml
|
|
||||||
|
|
||||||
pytorch_windows_build:
|
pytorch_windows_build:
|
||||||
<<: *pytorch_windows_params
|
<<: *pytorch_windows_params
|
||||||
@ -262,10 +257,10 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Install VS2019 toolchain
|
name: _HACK_ Install CUDA compatible cmath
|
||||||
no_output_timeout: 10m
|
no_output_timeout: 1m
|
||||||
command: |
|
command: |
|
||||||
powershell .circleci/scripts/vs_install.ps1
|
powershell .circleci/scripts/vs_install_cmath.ps1
|
||||||
- run:
|
- run:
|
||||||
name: Install Cuda
|
name: Install Cuda
|
||||||
no_output_timeout: 30m
|
no_output_timeout: 30m
|
||||||
@ -330,11 +325,6 @@ jobs:
|
|||||||
- checkout
|
- checkout
|
||||||
- attach_workspace:
|
- attach_workspace:
|
||||||
at: c:/users/circleci/workspace
|
at: c:/users/circleci/workspace
|
||||||
- run:
|
|
||||||
name: Install VS2019 toolchain
|
|
||||||
no_output_timeout: 10m
|
|
||||||
command: |
|
|
||||||
powershell .circleci/scripts/vs_install.ps1
|
|
||||||
- run:
|
- run:
|
||||||
name: Install Cuda
|
name: Install Cuda
|
||||||
no_output_timeout: 30m
|
no_output_timeout: 30m
|
||||||
@ -361,18 +351,5 @@ jobs:
|
|||||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
|
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
|
||||||
set -x
|
set -x
|
||||||
.jenkins/pytorch/win-test.sh
|
.jenkins/pytorch/win-test.sh
|
||||||
- run:
|
|
||||||
name: Report results
|
|
||||||
no_output_timeout: "5m"
|
|
||||||
command: |
|
|
||||||
set -ex
|
|
||||||
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_WIN_BUILD_V1}
|
|
||||||
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_WIN_BUILD_V1}
|
|
||||||
export PYTHONPATH="$PWD"
|
|
||||||
pip install typing_extensions boto3
|
|
||||||
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test
|
|
||||||
when: always
|
|
||||||
- store_test_results:
|
- store_test_results:
|
||||||
path: test/test-reports
|
path: test/test-reports
|
||||||
- store_artifacts:
|
|
||||||
path: test/coverage.xml
|
|
||||||
|
|||||||
@ -1,195 +0,0 @@
|
|||||||
scheduled-ci:
|
|
||||||
triggers:
|
|
||||||
- schedule:
|
|
||||||
# runs every 4 hours on the 45th minute
|
|
||||||
cron: "45 0,4,8,12,16,20 * * *"
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
jobs:
|
|
||||||
- docker_build_job:
|
|
||||||
name: "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
image_name: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
- pytorch_linux_build:
|
|
||||||
name: periodic_pytorch_xenial_cuda11_3_cudnn8_gcc7_build
|
|
||||||
requires:
|
|
||||||
- "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
build_environment: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-build"
|
|
||||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
- pytorch_linux_test:
|
|
||||||
name: periodic_pytorch_xenial_cuda11_3_cudnn8_gcc7_test
|
|
||||||
requires:
|
|
||||||
- periodic_pytorch_xenial_cuda11_3_cudnn8_gcc7_build
|
|
||||||
build_environment: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-test"
|
|
||||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
use_cuda_docker_runtime: "1"
|
|
||||||
resource_class: gpu.medium
|
|
||||||
- pytorch_linux_build:
|
|
||||||
name: periodic_libtorch_xenial_cuda11_3_cudnn8_gcc7_build
|
|
||||||
requires:
|
|
||||||
- "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
build_environment: "pytorch-libtorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-build"
|
|
||||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
- pytorch_windows_build:
|
|
||||||
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
|
|
||||||
cuda_version: "11.3"
|
|
||||||
name: periodic_pytorch_windows_cuda11.3_build
|
|
||||||
python_version: "3.6"
|
|
||||||
use_cuda: "1"
|
|
||||||
vc_product: BuildTools
|
|
||||||
vc_version: "14.28.29333"
|
|
||||||
vc_year: "2019"
|
|
||||||
- pytorch_windows_test:
|
|
||||||
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
|
|
||||||
cuda_version: "11.3"
|
|
||||||
executor: windows-with-nvidia-gpu
|
|
||||||
name: periodic_pytorch_windows_cuda11.3_test1
|
|
||||||
python_version: "3.6"
|
|
||||||
requires:
|
|
||||||
- periodic_pytorch_windows_cuda11.3_build
|
|
||||||
test_name: pytorch-windows-test1
|
|
||||||
use_cuda: "1"
|
|
||||||
vc_product: BuildTools
|
|
||||||
vc_version: "14.28.29333"
|
|
||||||
vc_year: "2019"
|
|
||||||
- pytorch_windows_test:
|
|
||||||
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
|
|
||||||
cuda_version: "11.3"
|
|
||||||
executor: windows-with-nvidia-gpu
|
|
||||||
name: periodic_pytorch_windows_cuda11.3_test2
|
|
||||||
python_version: "3.6"
|
|
||||||
requires:
|
|
||||||
- periodic_pytorch_windows_cuda11.3_build
|
|
||||||
test_name: pytorch-windows-test2
|
|
||||||
use_cuda: "1"
|
|
||||||
vc_product: BuildTools
|
|
||||||
vc_version: "14.28.29333"
|
|
||||||
vc_year: "2019"
|
|
||||||
|
|
||||||
# The following allows these jobs to run on ci-all and release branches
|
|
||||||
debuggable-scheduled-ci:
|
|
||||||
jobs:
|
|
||||||
- docker_build_job:
|
|
||||||
name: "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
image_name: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- /ci-all\/.*/
|
|
||||||
- /release\/.*/
|
|
||||||
- pytorch_linux_build:
|
|
||||||
name: pytorch_linux_xenial_cuda11_3_cudnn8_py3_gcc7_build
|
|
||||||
requires:
|
|
||||||
- "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
build_environment: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-build"
|
|
||||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- /ci-all\/.*/
|
|
||||||
- /release\/.*/
|
|
||||||
- pytorch_linux_test:
|
|
||||||
name: pytorch_linux_xenial_cuda11_3_cudnn8_py3_gcc7_test
|
|
||||||
requires:
|
|
||||||
- pytorch_linux_xenial_cuda11_3_cudnn8_py3_gcc7_build
|
|
||||||
build_environment: "pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-test"
|
|
||||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
use_cuda_docker_runtime: "1"
|
|
||||||
resource_class: gpu.medium
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- /ci-all\/.*/
|
|
||||||
- /release\/.*/
|
|
||||||
- pytorch_linux_build:
|
|
||||||
name: pytorch_libtorch_linux_xenial_cuda11_3_cudnn8_py3_gcc7_build
|
|
||||||
requires:
|
|
||||||
- "docker-pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
build_environment: "pytorch-libtorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7-build"
|
|
||||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda11.3-cudnn8-py3-gcc7"
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- /ci-all\/.*/
|
|
||||||
- /release\/.*/
|
|
||||||
- pytorch_windows_build:
|
|
||||||
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
|
|
||||||
cuda_version: "11.3"
|
|
||||||
name: pytorch_windows_vs2019_py36_cuda11.3_build
|
|
||||||
python_version: "3.6"
|
|
||||||
use_cuda: "1"
|
|
||||||
vc_product: BuildTools
|
|
||||||
vc_version: "14.28.29333"
|
|
||||||
vc_year: "2019"
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- /ci-all\/.*/
|
|
||||||
- /release\/.*/
|
|
||||||
- pytorch_windows_test:
|
|
||||||
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
|
|
||||||
cuda_version: "11.3"
|
|
||||||
executor: windows-with-nvidia-gpu
|
|
||||||
name: pytorch_windows_vs2019_py36_cuda11.3_test1
|
|
||||||
python_version: "3.6"
|
|
||||||
requires:
|
|
||||||
- pytorch_windows_vs2019_py36_cuda11.3_build
|
|
||||||
test_name: pytorch-windows-test1
|
|
||||||
use_cuda: "1"
|
|
||||||
vc_product: BuildTools
|
|
||||||
vc_version: "14.28.29333"
|
|
||||||
vc_year: "2019"
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- /ci-all\/.*/
|
|
||||||
- /release\/.*/
|
|
||||||
- pytorch_windows_test:
|
|
||||||
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
|
|
||||||
cuda_version: "11.3"
|
|
||||||
executor: windows-with-nvidia-gpu
|
|
||||||
name: pytorch_windows_vs2019_py36_cuda11.3_test2
|
|
||||||
python_version: "3.6"
|
|
||||||
requires:
|
|
||||||
- pytorch_windows_vs2019_py36_cuda11.3_build
|
|
||||||
test_name: pytorch-windows-test2
|
|
||||||
use_cuda: "1"
|
|
||||||
vc_product: BuildTools
|
|
||||||
vc_version: "14.28.29333"
|
|
||||||
vc_year: "2019"
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- /ci-all\/.*/
|
|
||||||
- /release\/.*/
|
|
||||||
|
|
||||||
# the following clones pytorch_linux_xenial_cuda10_2_cudnn7_py3_gcc7's tests but enables
|
|
||||||
# slow tests and sets an environment variable so gradcheck runs with fast_mode=False
|
|
||||||
slow-gradcheck-scheduled-ci:
|
|
||||||
triggers:
|
|
||||||
- schedule:
|
|
||||||
# runs every 8 hours on the 45th minute
|
|
||||||
cron: "45 0,8,16 * * *"
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
jobs:
|
|
||||||
- docker_build_job:
|
|
||||||
name: "docker-pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
|
||||||
image_name: "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
|
||||||
- pytorch_linux_build:
|
|
||||||
name: periodic_pytorch_xenial_cuda10_2_cudnn7_gcc7_build
|
|
||||||
requires:
|
|
||||||
- "docker-pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
|
||||||
build_environment: "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7-build"
|
|
||||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
|
||||||
- pytorch_linux_test:
|
|
||||||
name: periodic_pytorch_xenial_cuda10_2_cudnn7_gcc7_old_gradcheck_tests
|
|
||||||
requires:
|
|
||||||
- periodic_pytorch_xenial_cuda10_2_cudnn7_gcc7_build
|
|
||||||
build_environment: "pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7-old-gradcheck-tests"
|
|
||||||
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7"
|
|
||||||
use_cuda_docker_runtime: "1"
|
|
||||||
resource_class: gpu.medium
|
|
||||||
@ -1129,3 +1129,4 @@ JNIEXPORT void JNI_OnUnload(JavaVM* vm, void* reserved);
|
|||||||
#define JNI_ABORT 2 /* free buffer w/o copying back */
|
#define JNI_ABORT 2 /* free buffer w/o copying back */
|
||||||
|
|
||||||
#endif /* JNI_H_ */
|
#endif /* JNI_H_ */
|
||||||
|
|
||||||
|
|||||||
@ -6,11 +6,8 @@ bugprone-*,
|
|||||||
-bugprone-forward-declaration-namespace,
|
-bugprone-forward-declaration-namespace,
|
||||||
-bugprone-macro-parentheses,
|
-bugprone-macro-parentheses,
|
||||||
-bugprone-lambda-function-name,
|
-bugprone-lambda-function-name,
|
||||||
-bugprone-reserved-identifier,
|
|
||||||
cppcoreguidelines-*,
|
cppcoreguidelines-*,
|
||||||
-cppcoreguidelines-avoid-magic-numbers,
|
|
||||||
-cppcoreguidelines-interfaces-global-init,
|
-cppcoreguidelines-interfaces-global-init,
|
||||||
-cppcoreguidelines-macro-usage,
|
|
||||||
-cppcoreguidelines-owning-memory,
|
-cppcoreguidelines-owning-memory,
|
||||||
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
|
-cppcoreguidelines-pro-bounds-array-to-pointer-decay,
|
||||||
-cppcoreguidelines-pro-bounds-constant-array-index,
|
-cppcoreguidelines-pro-bounds-constant-array-index,
|
||||||
@ -33,7 +30,6 @@ modernize-*,
|
|||||||
-modernize-use-trailing-return-type,
|
-modernize-use-trailing-return-type,
|
||||||
performance-*,
|
performance-*,
|
||||||
-performance-noexcept-move-constructor,
|
-performance-noexcept-move-constructor,
|
||||||
-performance-unnecessary-value-param,
|
|
||||||
'
|
'
|
||||||
HeaderFilterRegex: 'torch/csrc/.*'
|
HeaderFilterRegex: 'torch/csrc/.*'
|
||||||
AnalyzeTemporaryDtors: false
|
AnalyzeTemporaryDtors: false
|
||||||
|
|||||||
15
.coveragerc
15
.coveragerc
@ -1,15 +0,0 @@
|
|||||||
[run]
|
|
||||||
plugins =
|
|
||||||
coverage_plugins.jit_plugin
|
|
||||||
omit =
|
|
||||||
*/tmp*
|
|
||||||
*/Temp/*
|
|
||||||
*/usr/local/lib*
|
|
||||||
*test/*
|
|
||||||
|
|
||||||
[report]
|
|
||||||
omit =
|
|
||||||
*/tmp*
|
|
||||||
*/Temp/*
|
|
||||||
*/usr/local/lib*
|
|
||||||
*test/*
|
|
||||||
35
.flake8
35
.flake8
@ -4,7 +4,7 @@ max-line-length = 120
|
|||||||
# C408 ignored because we like the dict keyword argument syntax
|
# C408 ignored because we like the dict keyword argument syntax
|
||||||
# E501 is not flexible enough, we're using B950 instead
|
# E501 is not flexible enough, we're using B950 instead
|
||||||
ignore =
|
ignore =
|
||||||
E203,E305,E402,E501,E721,E741,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
|
E203,E305,E402,E501,E721,E741,F403,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
|
||||||
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
|
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
|
||||||
# to line this up with executable bit
|
# to line this up with executable bit
|
||||||
EXE001,
|
EXE001,
|
||||||
@ -13,20 +13,21 @@ ignore =
|
|||||||
# these ignores are from flake8-comprehensions; please fix!
|
# these ignores are from flake8-comprehensions; please fix!
|
||||||
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415
|
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415
|
||||||
per-file-ignores = __init__.py: F401 torch/utils/cpp_extension.py: B950
|
per-file-ignores = __init__.py: F401 torch/utils/cpp_extension.py: B950
|
||||||
optional-ascii-coding = True
|
|
||||||
exclude =
|
exclude =
|
||||||
./.git,
|
docs/src,
|
||||||
./build_code_analyzer,
|
docs/cpp/src,
|
||||||
./build_test_custom_build,
|
venv,
|
||||||
./build,
|
third_party,
|
||||||
./caffe2,
|
caffe2,
|
||||||
./docs/caffe2,
|
scripts,
|
||||||
./docs/cpp/src,
|
docs/caffe2,
|
||||||
./docs/src,
|
torch/lib/include,
|
||||||
./scripts,
|
torch/lib/tmp_install,
|
||||||
./test/generated_type_hints_smoketest.py,
|
build,
|
||||||
./third_party,
|
torch/include,
|
||||||
./torch/include,
|
*.pyi,
|
||||||
./torch/lib,
|
.git,
|
||||||
./venv,
|
build,
|
||||||
*.pyi
|
build_test_custom_build,
|
||||||
|
build_code_analyzer,
|
||||||
|
test/generated_type_hints_smoketest.py
|
||||||
|
|||||||
14
.gdbinit
14
.gdbinit
@ -1,14 +0,0 @@
|
|||||||
# automatically load the pytoch-gdb extension.
|
|
||||||
#
|
|
||||||
# gdb automatically tries to load this file whenever it is executed from the
|
|
||||||
# root of the pytorch repo, but by default it is not allowed to do so due to
|
|
||||||
# security reasons. If you want to use pytorch-gdb, please add the following
|
|
||||||
# line to your ~/.gdbinit (i.e., the .gdbinit file which is in your home
|
|
||||||
# directory, NOT this file):
|
|
||||||
# add-auto-load-safe-path /path/to/pytorch/.gdbinit
|
|
||||||
#
|
|
||||||
# Alternatively, you can manually load the pytorch-gdb commands into your
|
|
||||||
# existing gdb session by doing the following:
|
|
||||||
# (gdb) source /path/to/pytorch/tools/gdb/pytorch-gdb.py
|
|
||||||
|
|
||||||
source tools/gdb/pytorch-gdb.py
|
|
||||||
2
.github/pytorch-circleci-labels.yml
vendored
2
.github/pytorch-circleci-labels.yml
vendored
@ -11,5 +11,3 @@ labels_to_circle_params:
|
|||||||
- v[0-9]+(\.[0-9]+)*-rc[0-9]+
|
- v[0-9]+(\.[0-9]+)*-rc[0-9]+
|
||||||
set_to_false:
|
set_to_false:
|
||||||
- run_build
|
- run_build
|
||||||
ci/master:
|
|
||||||
parameter: run_master_build
|
|
||||||
|
|||||||
34
.github/scale-config.yml
vendored
34
.github/scale-config.yml
vendored
@ -1,34 +0,0 @@
|
|||||||
# scale-config.yml:
|
|
||||||
# Powers what instance types are available for GHA auto-scaled
|
|
||||||
# runners. Runners listed here will be available as self hosted
|
|
||||||
# runners, configuration is directly pulled from the main branch.
|
|
||||||
#
|
|
||||||
# NOTE (Apr, 5, 2021): Linux runners are currently all an amazonlinux2
|
|
||||||
#
|
|
||||||
# TODO: Add some documentation on how the auto-scaling works
|
|
||||||
#
|
|
||||||
# NOTE: Default values,
|
|
||||||
#
|
|
||||||
# runner_types:
|
|
||||||
# runner_label:
|
|
||||||
# instance_type: m4.large
|
|
||||||
# os: linux
|
|
||||||
# max_available: 20
|
|
||||||
# disk_size: 50
|
|
||||||
|
|
||||||
runner_types:
|
|
||||||
linux.2xlarge:
|
|
||||||
instance_type: c5.2xlarge
|
|
||||||
os: linux
|
|
||||||
max_available: 500
|
|
||||||
disk_size: 150
|
|
||||||
linux.8xlarge.nvidia.gpu:
|
|
||||||
instance_type: g3.8xlarge
|
|
||||||
os: linux
|
|
||||||
max_available: 50
|
|
||||||
disk_size: 150
|
|
||||||
windows.4xlarge:
|
|
||||||
instance_type: c5.4xlarge
|
|
||||||
os: windows
|
|
||||||
max_available: 200
|
|
||||||
disk_size: 256
|
|
||||||
42
.github/scripts/build_publish_nightly_docker.sh
vendored
42
.github/scripts/build_publish_nightly_docker.sh
vendored
@ -1,42 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -xeuo pipefail
|
|
||||||
|
|
||||||
PYTORCH_DOCKER_TAG=$(git describe --tags --always)-devel
|
|
||||||
CUDA_VERSION=11.1
|
|
||||||
|
|
||||||
# Build PyTorch nightly docker
|
|
||||||
make -f docker.Makefile \
|
|
||||||
DOCKER_REGISTRY=ghcr.io \
|
|
||||||
DOCKER_ORG=pytorch \
|
|
||||||
CUDA_VERSION=${CUDA_VERSION} \
|
|
||||||
DOCKER_IMAGE=pytorch-nightly \
|
|
||||||
DOCKER_TAG=${PYTORCH_DOCKER_TAG} \
|
|
||||||
INSTALL_CHANNEL=pytorch-nightly BUILD_TYPE=official devel-image
|
|
||||||
|
|
||||||
# Get the PYTORCH_NIGHTLY_COMMIT from the docker image
|
|
||||||
PYTORCH_NIGHTLY_COMMIT=$(docker run \
|
|
||||||
ghcr.io/pytorch/pytorch-nightly:${PYTORCH_DOCKER_TAG} \
|
|
||||||
python -c 'import torch; print(torch.version.git_version)' | head -c 7)
|
|
||||||
|
|
||||||
docker tag ghcr.io/pytorch/pytorch-nightly:${PYTORCH_DOCKER_TAG} \
|
|
||||||
ghcr.io/pytorch/pytorch-nightly:${PYTORCH_NIGHTLY_COMMIT}-cu${CUDA_VERSION}
|
|
||||||
|
|
||||||
docker tag ghcr.io/pytorch/pytorch-nightly:${PYTORCH_NIGHTLY_COMMIT}-cu${CUDA_VERSION} \
|
|
||||||
ghcr.io/pytorch/pytorch-nightly:latest
|
|
||||||
|
|
||||||
# Push the nightly docker to GitHub Container Registry
|
|
||||||
echo $GHCR_PAT | docker login ghcr.io -u pytorch --password-stdin
|
|
||||||
make -f docker.Makefile \
|
|
||||||
DOCKER_REGISTRY=ghcr.io \
|
|
||||||
DOCKER_ORG=pytorch \
|
|
||||||
DOCKER_IMAGE=pytorch-nightly \
|
|
||||||
DOCKER_TAG=${PYTORCH_NIGHTLY_COMMIT}-cu${CUDA_VERSION} \
|
|
||||||
devel-push
|
|
||||||
|
|
||||||
make -f docker.Makefile \
|
|
||||||
DOCKER_REGISTRY=ghcr.io \
|
|
||||||
DOCKER_ORG=pytorch \
|
|
||||||
DOCKER_IMAGE=pytorch-nightly \
|
|
||||||
DOCKER_TAG=latest \
|
|
||||||
devel-push
|
|
||||||
149
.github/scripts/generate_binary_build_matrix.py
vendored
149
.github/scripts/generate_binary_build_matrix.py
vendored
@ -10,13 +10,14 @@ architectures:
|
|||||||
* Latest ROCM
|
* Latest ROCM
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
import json
|
||||||
from typing import Dict, List
|
import os
|
||||||
|
import itertools
|
||||||
|
|
||||||
CUDA_ARCHES = [
|
CUDA_ARCHES = [
|
||||||
|
"10.1",
|
||||||
"10.2",
|
"10.2",
|
||||||
"11.1"
|
"11.0"
|
||||||
]
|
]
|
||||||
|
|
||||||
ROCM_ARCHES = [
|
ROCM_ARCHES = [
|
||||||
@ -24,17 +25,13 @@ ROCM_ARCHES = [
|
|||||||
"4.0"
|
"4.0"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
FULL_ARCHES = [
|
||||||
|
"cpu",
|
||||||
|
*CUDA_ARCHES,
|
||||||
|
*ROCM_ARCHES
|
||||||
|
]
|
||||||
|
|
||||||
def arch_type(arch_version: str) -> str:
|
CONTAINER_IMAGES = {
|
||||||
if arch_version in CUDA_ARCHES:
|
|
||||||
return "cuda"
|
|
||||||
elif arch_version in ROCM_ARCHES:
|
|
||||||
return "rocm"
|
|
||||||
else: # arch_version should always be "cpu" in this case
|
|
||||||
return "cpu"
|
|
||||||
|
|
||||||
|
|
||||||
WHEEL_CONTAINER_IMAGES = {
|
|
||||||
**{
|
**{
|
||||||
# TODO: Re-do manylinux CUDA image tagging scheme to be similar to
|
# TODO: Re-do manylinux CUDA image tagging scheme to be similar to
|
||||||
# ROCM so we don't have to do this replacement
|
# ROCM so we don't have to do this replacement
|
||||||
@ -48,29 +45,6 @@ WHEEL_CONTAINER_IMAGES = {
|
|||||||
"cpu": "pytorch/manylinux-cpu"
|
"cpu": "pytorch/manylinux-cpu"
|
||||||
}
|
}
|
||||||
|
|
||||||
CONDA_CONTAINER_IMAGES = {
|
|
||||||
**{
|
|
||||||
gpu_arch: f"pytorch/conda-builder:cuda{gpu_arch}"
|
|
||||||
for gpu_arch in CUDA_ARCHES
|
|
||||||
},
|
|
||||||
"cpu": "pytorch/conda-builder:cpu"
|
|
||||||
}
|
|
||||||
|
|
||||||
LIBTORCH_CONTAINER_IMAGES = {
|
|
||||||
**{
|
|
||||||
# TODO: Re-do manylinux CUDA image tagging scheme to be similar to
|
|
||||||
# ROCM so we don't have to do this replacement
|
|
||||||
(gpu_arch, "pre-cxx11"): f"pytorch/manylinux-cuda{gpu_arch.replace('.', '')}"
|
|
||||||
for gpu_arch in CUDA_ARCHES
|
|
||||||
},
|
|
||||||
**{
|
|
||||||
(gpu_arch, "cxx11-abi"): f"pytorch/libtorch-cxx11-builder:cuda{gpu_arch}"
|
|
||||||
for gpu_arch in CUDA_ARCHES
|
|
||||||
},
|
|
||||||
("cpu", "pre-cxx11"): "pytorch/manylinux-cpu",
|
|
||||||
("cpu", "cxx11-abi"): "pytorch/libtorch-cxx11-builder:cpu",
|
|
||||||
}
|
|
||||||
|
|
||||||
FULL_PYTHON_VERSIONS = [
|
FULL_PYTHON_VERSIONS = [
|
||||||
"3.6",
|
"3.6",
|
||||||
"3.7",
|
"3.7",
|
||||||
@ -79,89 +53,34 @@ FULL_PYTHON_VERSIONS = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def is_pull_request() -> bool:
|
def is_pull_request():
|
||||||
return False
|
return os.environ.get("GITHUB_HEAD_REF")
|
||||||
# return os.environ.get("GITHUB_HEAD_REF")
|
|
||||||
|
|
||||||
|
def generate_matrix():
|
||||||
def snip_if(is_pr: bool, versions: List[str]) -> List[str]:
|
python_versions = FULL_PYTHON_VERSIONS
|
||||||
"""
|
arches = FULL_ARCHES
|
||||||
Return the full list of versions, or just the latest if on a PR.
|
if is_pull_request():
|
||||||
"""
|
python_versions = [python_versions[-1]]
|
||||||
return [versions[-1]] if is_pr else versions
|
arches = ["cpu", CUDA_ARCHES[-1], ROCM_ARCHES[-1]]
|
||||||
|
matrix = []
|
||||||
|
for item in itertools.product(python_versions, arches):
|
||||||
def generate_conda_matrix(is_pr: bool) -> List[Dict[str, str]]:
|
python_version, arch_version = item
|
||||||
return [
|
# Not my favorite code here
|
||||||
{
|
gpu_arch_type = "cuda"
|
||||||
|
if "rocm" in CONTAINER_IMAGES[arch_version]:
|
||||||
|
gpu_arch_type = "rocm"
|
||||||
|
elif "cpu" in CONTAINER_IMAGES[arch_version]:
|
||||||
|
gpu_arch_type = "cpu"
|
||||||
|
matrix.append({
|
||||||
"python_version": python_version,
|
"python_version": python_version,
|
||||||
"gpu_arch_type": arch_type(arch_version),
|
"gpu_arch_type": gpu_arch_type,
|
||||||
"gpu_arch_version": arch_version,
|
"gpu_arch_version": arch_version,
|
||||||
"container_image": CONDA_CONTAINER_IMAGES[arch_version],
|
"container_image": CONTAINER_IMAGES[arch_version]
|
||||||
}
|
})
|
||||||
for python_version in snip_if(is_pr, FULL_PYTHON_VERSIONS)
|
return json.dumps({"include": matrix})
|
||||||
# We don't currently build conda packages for rocm
|
|
||||||
for arch_version in ["cpu"] + snip_if(is_pr, CUDA_ARCHES)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def generate_libtorch_matrix(is_pr: bool) -> List[Dict[str, str]]:
|
|
||||||
libtorch_variants = [
|
|
||||||
"shared-with-deps",
|
|
||||||
"shared-without-deps",
|
|
||||||
"static-with-deps",
|
|
||||||
"static-without-deps",
|
|
||||||
]
|
|
||||||
return [
|
|
||||||
{
|
|
||||||
"gpu_arch_type": arch_type(arch_version),
|
|
||||||
"gpu_arch_version": arch_version,
|
|
||||||
"libtorch_variant": libtorch_variant,
|
|
||||||
"devtoolset": abi_version,
|
|
||||||
"container_image": LIBTORCH_CONTAINER_IMAGES[(arch_version, abi_version)],
|
|
||||||
}
|
|
||||||
# We don't currently build libtorch for rocm
|
|
||||||
for arch_version in ["cpu"] + snip_if(is_pr, CUDA_ARCHES)
|
|
||||||
for libtorch_variant in libtorch_variants
|
|
||||||
# one of the values in the following list must be exactly
|
|
||||||
# "cxx11-abi", but the precise value of the other one doesn't
|
|
||||||
# matter
|
|
||||||
for abi_version in ["cxx11-abi", "pre-cxx11"]
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def generate_wheels_matrix(is_pr: bool) -> List[Dict[str, str]]:
|
|
||||||
arches = ["cpu"]
|
|
||||||
arches += snip_if(is_pr, CUDA_ARCHES)
|
|
||||||
arches += snip_if(is_pr, ROCM_ARCHES)
|
|
||||||
return [
|
|
||||||
{
|
|
||||||
"python_version": python_version,
|
|
||||||
"gpu_arch_type": arch_type(arch_version),
|
|
||||||
"gpu_arch_version": arch_version,
|
|
||||||
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
|
|
||||||
}
|
|
||||||
for python_version in snip_if(is_pr, FULL_PYTHON_VERSIONS)
|
|
||||||
for arch_version in arches
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def from_includes(includes: List[Dict[str, str]]) -> str:
|
|
||||||
return json.dumps({"include": includes})
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument('mode', choices=['conda', 'libtorch', 'wheels'])
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
is_pr = is_pull_request()
|
|
||||||
print(from_includes({
|
|
||||||
'conda': generate_conda_matrix,
|
|
||||||
'libtorch': generate_libtorch_matrix,
|
|
||||||
'wheels': generate_wheels_matrix,
|
|
||||||
}[args.mode](is_pr)))
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
print(generate_matrix())
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
161
.github/scripts/generate_linux_ci_workflows.py
vendored
161
.github/scripts/generate_linux_ci_workflows.py
vendored
@ -1,161 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import jinja2
|
|
||||||
|
|
||||||
DOCKER_REGISTRY = "308535385114.dkr.ecr.us-east-1.amazonaws.com"
|
|
||||||
|
|
||||||
GITHUB_DIR = Path(__file__).parent.parent
|
|
||||||
|
|
||||||
CPU_TEST_RUNNER = "linux.2xlarge"
|
|
||||||
CUDA_TEST_RUNNER = "linux.8xlarge.nvidia.gpu"
|
|
||||||
|
|
||||||
|
|
||||||
class PyTorchLinuxWorkflow:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
build_environment: str,
|
|
||||||
docker_image_base: str,
|
|
||||||
on_pull_request: bool = False,
|
|
||||||
enable_doc_jobs: bool = False,
|
|
||||||
):
|
|
||||||
self.build_environment = build_environment
|
|
||||||
self.docker_image_base = docker_image_base
|
|
||||||
self.test_runner_type = CPU_TEST_RUNNER
|
|
||||||
self.on_pull_request = on_pull_request
|
|
||||||
self.enable_doc_jobs = enable_doc_jobs
|
|
||||||
if "cuda" in build_environment:
|
|
||||||
self.test_runner_type = CUDA_TEST_RUNNER
|
|
||||||
|
|
||||||
def generate_workflow_file(
|
|
||||||
self, workflow_template: jinja2.Template, jinja_env: jinja2.Environment
|
|
||||||
) -> Path:
|
|
||||||
output_file_path = GITHUB_DIR.joinpath(
|
|
||||||
f"workflows/{self.build_environment}.yml"
|
|
||||||
)
|
|
||||||
with open(output_file_path, "w") as output_file:
|
|
||||||
output_file.writelines(["# @generated DO NOT EDIT MANUALLY\n"])
|
|
||||||
output_file.write(
|
|
||||||
workflow_template.render(
|
|
||||||
build_environment=self.build_environment,
|
|
||||||
docker_image_base=self.docker_image_base,
|
|
||||||
test_runner_type=self.test_runner_type,
|
|
||||||
enable_doc_jobs=self.enable_doc_jobs,
|
|
||||||
on_pull_request=self.on_pull_request,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
output_file.write('\n')
|
|
||||||
return output_file_path
|
|
||||||
|
|
||||||
|
|
||||||
WORKFLOWS = [
|
|
||||||
PyTorchLinuxWorkflow(
|
|
||||||
build_environment="pytorch-linux-xenial-py3.6-gcc5.4",
|
|
||||||
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
|
|
||||||
on_pull_request=True,
|
|
||||||
enable_doc_jobs=True,
|
|
||||||
),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-paralleltbb-linux-xenial-py3.6-gcc5.4",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-parallelnative-linux-xenial-py3.6-gcc5.4",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-pure_torch-linux-xenial-py3.6-gcc5.4",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc5.4",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-gcc7",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3.6-gcc7",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang5-asan",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-asan",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang7-onnx",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang7-onnx",
|
|
||||||
# ),
|
|
||||||
PyTorchLinuxWorkflow(
|
|
||||||
build_environment="pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7",
|
|
||||||
docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
|
|
||||||
),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-libtorch-linux-xenial-cuda11.1-cudnn8-py3.6-gcc7",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-bionic-py3.6-clang9-noarch",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.6-clang9",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-xla-linux-bionic-py3.6-clang9",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.6-clang9",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-vulkan-linux-bionic-py3.6-clang9",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.6-clang9",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-bionic-py3.8-gcc9-coverage",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-py3.8-gcc9",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-bionic-rocm3.9-py3.6",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-bionic-rocm3.9-py3.6",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-x86_32",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-x86_64",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-arm-v7a",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang5-android-ndk-r19c-arm-v8a",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-asan",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile-custom-dynamic",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile-custom-static",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
||||||
# ),
|
|
||||||
# PyTorchLinuxWorkflow(
|
|
||||||
# build_environment="pytorch-linux-xenial-py3.6-clang5-mobile-code-analysis",
|
|
||||||
# docker_image_base=f"{DOCKER_REGISTRY}/pytorch/pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
|
|
||||||
# ),
|
|
||||||
]
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
jinja_env = jinja2.Environment(
|
|
||||||
variable_start_string="!{{",
|
|
||||||
loader=jinja2.FileSystemLoader(str(GITHUB_DIR.joinpath("templates"))),
|
|
||||||
)
|
|
||||||
workflow_template = jinja_env.get_template("linux_ci_workflow.yml.in")
|
|
||||||
for workflow in WORKFLOWS:
|
|
||||||
print(
|
|
||||||
workflow.generate_workflow_file(
|
|
||||||
workflow_template=workflow_template,
|
|
||||||
jinja_env=jinja_env
|
|
||||||
)
|
|
||||||
)
|
|
||||||
55
.github/scripts/install_nvidia_utils_linux.sh
vendored
55
.github/scripts/install_nvidia_utils_linux.sh
vendored
@ -1,55 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -eou pipefail
|
|
||||||
|
|
||||||
DISTRIBUTION=$(. /etc/os-release;echo $ID$VERSION_ID) \
|
|
||||||
DRIVER_FN="NVIDIA-Linux-x86_64-460.39.run"
|
|
||||||
YUM_REPO_URL="https://nvidia.github.io/nvidia-docker/${DISTRIBUTION}/nvidia-docker.repo"
|
|
||||||
|
|
||||||
install_nvidia_docker2_amzn2() {
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
# Needed for yum-config-manager
|
|
||||||
sudo yum install -y yum-utils
|
|
||||||
sudo yum-config-manager --add-repo "${YUM_REPO_URL}"
|
|
||||||
sudo yum install -y nvidia-docker2
|
|
||||||
sudo systemctl restart docker
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
install_nvidia_driver_amzn2() {
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
sudo yum groupinstall -y "Development Tools"
|
|
||||||
# ensure our kernel install is the same as our underlying kernel,
|
|
||||||
# groupinstall "Development Tools" has a habit of mismatching kernel headers
|
|
||||||
sudo yum install -y "kernel-devel-uname-r == $(uname -r)"
|
|
||||||
sudo curl -fsL -o /tmp/nvidia_driver "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN"
|
|
||||||
sudo /bin/bash /tmp/nvidia_driver -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false)
|
|
||||||
sudo rm -fv /tmp/nvidia_driver
|
|
||||||
nvidia-smi
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Install container toolkit based on distribution
|
|
||||||
echo "== Installing nvidia container toolkit for ${DISTRIBUTION} =="
|
|
||||||
case "${DISTRIBUTION}" in
|
|
||||||
amzn*)
|
|
||||||
install_nvidia_docker2_amzn2
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "ERROR: Unknown distribution ${DISTRIBUTION}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
echo "== Installing nvidia driver ${DRIVER_FN} =="
|
|
||||||
case "${DISTRIBUTION}" in
|
|
||||||
amzn*)
|
|
||||||
install_nvidia_driver_amzn2
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "ERROR: Unknown distribution ${DISTRIBUTION}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
51
.github/scripts/lint_native_functions.py
vendored
51
.github/scripts/lint_native_functions.py
vendored
@ -1,51 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
'''
|
|
||||||
Verify that it is possible to round-trip native_functions.yaml via ruamel under some
|
|
||||||
configuration. Keeping native_functions.yaml consistent in this way allows us to
|
|
||||||
run codemods on the file using ruamel without introducing line noise. Note that we don't
|
|
||||||
want to normalize the YAML file, as that would to lots of spurious lint failures. Anything
|
|
||||||
that ruamel understands how to roundtrip, e.g., whitespace and comments, is OK!
|
|
||||||
|
|
||||||
ruamel is a bit picky about inconsistent indentation, so you will have to indent your
|
|
||||||
file properly. Also, if you are working on changing the syntax of native_functions.yaml,
|
|
||||||
you may find that you want to use some format that is not what ruamel prefers. If so,
|
|
||||||
it is OK to modify this script (instead of reformatting native_functions.yaml)--the point
|
|
||||||
is simply to make sure that there is *some* configuration of ruamel that can round trip
|
|
||||||
the YAML, not to be prescriptive about it.
|
|
||||||
'''
|
|
||||||
|
|
||||||
import ruamel.yaml
|
|
||||||
import difflib
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
from io import StringIO
|
|
||||||
|
|
||||||
def fn(base):
|
|
||||||
return str(base / Path("aten/src/ATen/native/native_functions.yaml"))
|
|
||||||
|
|
||||||
with open(Path(__file__).parent.parent.parent / fn('.'), "r") as f:
|
|
||||||
contents = f.read()
|
|
||||||
|
|
||||||
yaml = ruamel.yaml.YAML()
|
|
||||||
yaml.preserve_quotes = True
|
|
||||||
yaml.width = 1000
|
|
||||||
yaml.boolean_representation = ['False', 'True']
|
|
||||||
r = yaml.load(contents)
|
|
||||||
|
|
||||||
# Cuz ruamel's author intentionally didn't include conversion to string
|
|
||||||
# https://stackoverflow.com/questions/47614862/best-way-to-use-ruamel-yaml-to-dump-to-string-not-to-stream
|
|
||||||
string_stream = StringIO()
|
|
||||||
yaml.dump(r, string_stream)
|
|
||||||
new_contents = string_stream.getvalue()
|
|
||||||
string_stream.close()
|
|
||||||
|
|
||||||
if contents != new_contents:
|
|
||||||
print("""\
|
|
||||||
|
|
||||||
## LINT FAILURE: native_functions.yaml ##
|
|
||||||
|
|
||||||
native_functions.yaml failed lint; please apply the diff below to fix lint.
|
|
||||||
If you think this is in error, please see .github/scripts/lint_native_functions.py
|
|
||||||
""", file=sys.stderr)
|
|
||||||
sys.stdout.writelines(difflib.unified_diff(contents.splitlines(True), new_contents.splitlines(True), fn('a'), fn('b')))
|
|
||||||
sys.exit(1)
|
|
||||||
21
.github/scripts/parse_ref.py
vendored
21
.github/scripts/parse_ref.py
vendored
@ -1,21 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
ref = os.environ['GITHUB_REF']
|
|
||||||
m = re.match(r'^refs/(\w+)/(.*)$', ref)
|
|
||||||
if m:
|
|
||||||
category, stripped = m.groups()
|
|
||||||
if category == 'heads':
|
|
||||||
print(f'::set-output name=branch::{stripped}')
|
|
||||||
elif category == 'pull':
|
|
||||||
print(f'::set-output name=branch::pull/{stripped.split("/")[0]}')
|
|
||||||
elif category == 'tags':
|
|
||||||
print(f'::set-output name=tag::{stripped}')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@ -1,37 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
'''
|
|
||||||
This file verifies that the workflows that are potentially canceled in our cancel_redundant_workflow.yml
|
|
||||||
match the workflows we have running on pull requests (found in .github/workflows). This way, anytime a
|
|
||||||
workflow is added or removed, people can be reminded to modify the cancel_redundant_workflow.yml accordingly.
|
|
||||||
'''
|
|
||||||
|
|
||||||
|
|
||||||
import ruamel.yaml
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
|
|
||||||
yaml = ruamel.yaml.YAML()
|
|
||||||
yaml.preserve_quotes = True
|
|
||||||
yaml.boolean_representation = ['False', 'True']
|
|
||||||
yaml.default_flow_style = False
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
workflow_paths = (Path(__file__).parent.parent / 'workflows').rglob('*')
|
|
||||||
workflows = []
|
|
||||||
for path in workflow_paths:
|
|
||||||
if path.suffix in {'.yml', '.yaml'}:
|
|
||||||
with open(path) as f:
|
|
||||||
data = yaml.load(f)
|
|
||||||
assert 'name' in data, 'Every GHA workflow must have a name.'
|
|
||||||
if 'pull_request' in data['on']:
|
|
||||||
workflows.append(data['name'])
|
|
||||||
|
|
||||||
with open('.github/workflows/cancel_redundant_workflows.yml', 'r') as f:
|
|
||||||
data = yaml.load(f)
|
|
||||||
|
|
||||||
# Replace workflows to cancel
|
|
||||||
data['on']['workflow_run']['workflows'] = sorted(workflows)
|
|
||||||
|
|
||||||
with open('.github/workflows/cancel_redundant_workflows.yml', 'w') as f:
|
|
||||||
yaml.dump(data, f)
|
|
||||||
5
.github/scripts/report_git_status.sh
vendored
5
.github/scripts/report_git_status.sh
vendored
@ -1,5 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
CHANGES=$(git status --porcelain)
|
|
||||||
echo "$CHANGES"
|
|
||||||
git diff
|
|
||||||
[ -z "$CHANGES" ]
|
|
||||||
103
.github/scripts/run_torchbench.py
vendored
103
.github/scripts/run_torchbench.py
vendored
@ -1,103 +0,0 @@
|
|||||||
"""
|
|
||||||
Generate a torchbench test report from a file containing the PR body.
|
|
||||||
Currently, only supports running tests on specified model names
|
|
||||||
|
|
||||||
Testing environment:
|
|
||||||
- Intel Xeon 8259CL @ 2.50 GHz, 24 Cores with disabled Turbo and HT
|
|
||||||
- Nvidia Tesla T4
|
|
||||||
- Nvidia Driver 450.51.06
|
|
||||||
- Python 3.7
|
|
||||||
- CUDA 10.2
|
|
||||||
"""
|
|
||||||
# Known issues:
|
|
||||||
# 1. Does not reuse the build artifact in other CI workflows
|
|
||||||
# 2. CI jobs are serialized because there is only one worker
|
|
||||||
import os
|
|
||||||
import pathlib
|
|
||||||
import argparse
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
CUDA_VERSION = "cu102"
|
|
||||||
PYTHON_VERSION = "3.7"
|
|
||||||
TORCHBENCH_CONFIG_NAME = "config.yaml"
|
|
||||||
MAGIC_PREFIX = "RUN_TORCHBENCH:"
|
|
||||||
ABTEST_CONFIG_TEMPLATE = """# This config is automatically generated by run_torchbench.py
|
|
||||||
start: {control}
|
|
||||||
end: {treatment}
|
|
||||||
threshold: 100
|
|
||||||
direction: decrease
|
|
||||||
timeout: 720
|
|
||||||
tests:"""
|
|
||||||
|
|
||||||
def gen_abtest_config(control: str, treatment: str, models: List[str]):
|
|
||||||
d = {}
|
|
||||||
d["control"] = control
|
|
||||||
d["treatment"] = treatment
|
|
||||||
config = ABTEST_CONFIG_TEMPLATE.format(**d)
|
|
||||||
if models == ["ALL"]:
|
|
||||||
return config + "\n"
|
|
||||||
for model in models:
|
|
||||||
config = f"{config}\n - {model}"
|
|
||||||
config = config + "\n"
|
|
||||||
return config
|
|
||||||
|
|
||||||
def deploy_torchbench_config(output_dir: str, config: str):
|
|
||||||
# Create test dir if needed
|
|
||||||
pathlib.Path(output_dir).mkdir(exist_ok=True)
|
|
||||||
# TorchBench config file name
|
|
||||||
config_path = os.path.join(output_dir, TORCHBENCH_CONFIG_NAME)
|
|
||||||
with open(config_path, "w") as fp:
|
|
||||||
fp.write(config)
|
|
||||||
|
|
||||||
def extract_models_from_pr(torchbench_path: str, prbody_file: str) -> List[str]:
|
|
||||||
model_list = []
|
|
||||||
with open(prbody_file, "r") as pf:
|
|
||||||
lines = map(lambda x: x.strip(), pf.read().splitlines())
|
|
||||||
magic_lines = list(filter(lambda x: x.startswith(MAGIC_PREFIX), lines))
|
|
||||||
if magic_lines:
|
|
||||||
# Only the first magic line will be respected.
|
|
||||||
model_list = list(map(lambda x: x.strip(), magic_lines[0][len(MAGIC_PREFIX):].split(",")))
|
|
||||||
# Shortcut: if model_list is ["ALL"], run all the tests
|
|
||||||
if model_list == ["ALL"]:
|
|
||||||
return model_list
|
|
||||||
# Sanity check: make sure all the user specified models exist in torchbench repository
|
|
||||||
benchmark_path = os.path.join(torchbench_path, "torchbenchmark", "models")
|
|
||||||
full_model_list = [model for model in os.listdir(benchmark_path) if os.path.isdir(os.path.join(benchmark_path, model))]
|
|
||||||
for m in model_list:
|
|
||||||
if m not in full_model_list:
|
|
||||||
print(f"The model {m} you specified does not exist in TorchBench suite. Please double check.")
|
|
||||||
return []
|
|
||||||
return model_list
|
|
||||||
|
|
||||||
def run_torchbench(pytorch_path: str, torchbench_path: str, output_dir: str):
|
|
||||||
# Copy system environment so that we will not override
|
|
||||||
env = dict(os.environ)
|
|
||||||
command = ["python", "bisection.py", "--work-dir", output_dir,
|
|
||||||
"--pytorch-src", pytorch_path, "--torchbench-src", torchbench_path,
|
|
||||||
"--config", os.path.join(output_dir, "config.yaml"),
|
|
||||||
"--output", os.path.join(output_dir, "result.txt")]
|
|
||||||
subprocess.check_call(command, cwd=torchbench_path, env=env)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(description='Run TorchBench tests based on PR')
|
|
||||||
parser.add_argument('--pr-num', required=True, type=str, help="The Pull Request number")
|
|
||||||
parser.add_argument('--pr-base-sha', required=True, type=str, help="The Pull Request base hash")
|
|
||||||
parser.add_argument('--pr-head-sha', required=True, type=str, help="The Pull Request head hash")
|
|
||||||
parser.add_argument('--pr-body', required=True, help="The file that contains body of a Pull Request")
|
|
||||||
parser.add_argument('--pytorch-path', required=True, type=str, help="Path to pytorch repository")
|
|
||||||
parser.add_argument('--torchbench-path', required=True, type=str, help="Path to TorchBench repository")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
output_dir: str = os.path.join(os.environ["HOME"], ".torchbench", "bisection", f"pr{args.pr_num}")
|
|
||||||
# Identify the specified models and verify the input
|
|
||||||
models = extract_models_from_pr(args.torchbench_path, args.pr_body)
|
|
||||||
if not models:
|
|
||||||
print("Can't parse the model filter from the pr body. Currently we only support allow-list.")
|
|
||||||
exit(1)
|
|
||||||
print(f"Ready to run TorchBench with benchmark. Result will be saved in the directory: {output_dir}.")
|
|
||||||
# Run TorchBench with the generated config
|
|
||||||
torchbench_config = gen_abtest_config(args.pr_base_sha, args.pr_head_sha, models)
|
|
||||||
deploy_torchbench_config(output_dir, torchbench_config)
|
|
||||||
run_torchbench(pytorch_path=args.pytorch_path, torchbench_path=args.torchbench_path, output_dir=output_dir)
|
|
||||||
368
.github/templates/linux_ci_workflow.yml.in
vendored
368
.github/templates/linux_ci_workflow.yml.in
vendored
@ -1,368 +0,0 @@
|
|||||||
# Template is at: .github/templates/linux_ci_workflow.yml
|
|
||||||
# Generation script: .github/scripts/generate_linux_ci_workflows.py
|
|
||||||
name: Linux CI (!{{ build_environment }})
|
|
||||||
|
|
||||||
on:
|
|
||||||
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
|
|
||||||
{%- if on_pull_request %}
|
|
||||||
pull_request:
|
|
||||||
{%- endif %}
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- release/*
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
BUILD_ENVIRONMENT: !{{ build_environment }}
|
|
||||||
DOCKER_IMAGE_BASE: !{{ docker_image_base }}
|
|
||||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
|
||||||
TORCH_CUDA_ARCH_LIST: 5.2
|
|
||||||
IN_CI: 1
|
|
||||||
# Used for custom_opertor, jit_hooks, custom_backend, see .jenkins/pytorch/build.sh
|
|
||||||
CUSTOM_TEST_ARTIFACT_BUILD_DIR: build/custom_test_artifacts
|
|
||||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
calculate-docker-image:
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
timeout-minutes: 90
|
|
||||||
outputs:
|
|
||||||
docker_image: ${{ steps.calculate-tag.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
# deep clone, to allow use of git merge-base
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Calculate docker image tag
|
|
||||||
id: calculate-tag
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=$(git rev-parse HEAD:.circleci/docker)
|
|
||||||
echo "::set-output name=docker_tag::${DOCKER_TAG}"
|
|
||||||
echo "::set-output name=docker_image::${DOCKER_IMAGE_BASE}:${DOCKER_TAG}"
|
|
||||||
- name: Check if image should be built
|
|
||||||
id: check
|
|
||||||
env:
|
|
||||||
DOCKER_TAG: ${{ steps.calculate-tag.outputs.docker_tag }}
|
|
||||||
BASE_REVISION: ${{ github.event.pull_request.base.sha || github.sha }}
|
|
||||||
run: |
|
|
||||||
eval "$(aws ecr get-login --no-include-email --region us-east-1)"
|
|
||||||
set -x
|
|
||||||
# Check if image already exists, if it does then skip building it
|
|
||||||
if docker manifest inspect "${DOCKER_IMAGE_BASE}:${DOCKER_TAG}"; then
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
if [[ "$BASE_REVISION" = "$(git rev-parse HEAD)" ]]; then
|
|
||||||
# if we're on the base branch then use the parent commit
|
|
||||||
MERGE_BASE=$(git rev-parse HEAD~)
|
|
||||||
else
|
|
||||||
# otherwise we're on a PR, so use the most recent base commit
|
|
||||||
MERGE_BASE=$(git merge-base HEAD "$BASE_REVISION")
|
|
||||||
fi
|
|
||||||
# Covers the case where a previous tag doesn't exist for the tree
|
|
||||||
# this is only really applicable on trees that don't have `.circleci/docker` at its merge base, i.e. nightly
|
|
||||||
if ! git rev-parse "$MERGE_BASE:.circleci/docker"; then
|
|
||||||
echo "Directory '.circleci/docker' not found in commit $MERGE_BASE, you should probably rebase onto a more recent commit"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
PREVIOUS_DOCKER_TAG=$(git rev-parse "$MERGE_BASE:.circleci/docker")
|
|
||||||
# If no image exists but the hash is the same as the previous hash then we should error out here
|
|
||||||
if [[ "${PREVIOUS_DOCKER_TAG}" = "${DOCKER_TAG}" ]]; then
|
|
||||||
echo "ERROR: Something has gone wrong and the previous image isn't available for the merge-base of your branch"
|
|
||||||
echo " contact the PyTorch team to restore the original images"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo ::set-output name=rebuild::yes
|
|
||||||
- name: Build and push docker image
|
|
||||||
if: steps.check.outputs.rebuild
|
|
||||||
env:
|
|
||||||
DOCKER_TAG: ${{ steps.calculate-tag.outputs.docker_tag }}
|
|
||||||
DOCKER_SKIP_S3_UPLOAD: 1
|
|
||||||
run: |
|
|
||||||
export IMAGE_NAME=${DOCKER_IMAGE_BASE#308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/}
|
|
||||||
cd .circleci/docker && ./build_docker.sh
|
|
||||||
build:
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
needs: calculate-docker-image
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Log in to ECR
|
|
||||||
run: |
|
|
||||||
aws ecr get-login --no-include-email --region us-east-1 > /tmp/ecr-login.sh
|
|
||||||
bash /tmp/ecr-login.sh
|
|
||||||
rm /tmp/ecr-login.sh
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0 # deep clone, to allow sharding to use git rev-list
|
|
||||||
submodules: recursive
|
|
||||||
- name: Pull docker image
|
|
||||||
run: |
|
|
||||||
docker pull "${DOCKER_IMAGE}"
|
|
||||||
- name: Preserve github env variables for use in docker
|
|
||||||
run: |
|
|
||||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
|
||||||
- name: Build PyTorch
|
|
||||||
run: |
|
|
||||||
docker run \
|
|
||||||
-e BUILD_ENVIRONMENT \
|
|
||||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
|
||||||
-e SCCACHE_BUCKET \
|
|
||||||
-e CUSTOM_TEST_ARTIFACT_BUILD_DIR \
|
|
||||||
-e SKIP_SCCACHE_INITIALIZATION=1 \
|
|
||||||
-e TORCH_CUDA_ARCH_LIST \
|
|
||||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
|
||||||
--security-opt seccomp=unconfined \
|
|
||||||
--cap-add=SYS_PTRACE \
|
|
||||||
--tty \
|
|
||||||
--user jenkins \
|
|
||||||
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
|
|
||||||
-w /var/lib/jenkins/workspace \
|
|
||||||
"${DOCKER_IMAGE}" \
|
|
||||||
sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh'
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Archive artifacts into zip
|
|
||||||
run: |
|
|
||||||
zip -r artifacts.zip dist/ build/
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
name: Store PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_ENVIRONMENT }}
|
|
||||||
retention-days: 30
|
|
||||||
if-no-files-found: error
|
|
||||||
path:
|
|
||||||
artifacts.zip
|
|
||||||
- name: Clean up docker images
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Prune all of the docker images
|
|
||||||
docker system prune -af
|
|
||||||
test:
|
|
||||||
runs-on: !{{ test_runner_type }}
|
|
||||||
needs:
|
|
||||||
- calculate-docker-image
|
|
||||||
- build
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Log in to ECR
|
|
||||||
run: |
|
|
||||||
aws ecr get-login --no-include-email --region us-east-1 > /tmp/ecr-login.sh
|
|
||||||
bash /tmp/ecr-login.sh
|
|
||||||
rm /tmp/ecr-login.sh
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)/../":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Pull docker image
|
|
||||||
run: |
|
|
||||||
docker pull "${DOCKER_IMAGE}"
|
|
||||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
|
||||||
if: ${{ contains(env.BUILD_ENVIRONMENT, 'cuda') }}
|
|
||||||
run: |
|
|
||||||
bash .github/scripts/install_nvidia_utils_linux.sh
|
|
||||||
echo "GPU_FLAG=--gpus all" >> "${GITHUB_ENV}"
|
|
||||||
- name: Determine shm-size
|
|
||||||
run: |
|
|
||||||
shm_size="1g"
|
|
||||||
case "${BUILD_ENVIRONMENT}" in
|
|
||||||
*cuda*)
|
|
||||||
shm_size="2g"
|
|
||||||
;;
|
|
||||||
*rocm*)
|
|
||||||
shm_size="8g"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "SHM_SIZE=${shm_size}" >> "${GITHUB_ENV}"
|
|
||||||
- uses: actions/download-artifact@v2
|
|
||||||
name: Download PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_ENVIRONMENT }}
|
|
||||||
- name: Unzip artifacts
|
|
||||||
run: |
|
|
||||||
unzip -o artifacts.zip
|
|
||||||
- name: Output disk space left
|
|
||||||
run: |
|
|
||||||
sudo df -H
|
|
||||||
- name: Preserve github env variables for use in docker
|
|
||||||
run: |
|
|
||||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
|
||||||
- name: Test PyTorch
|
|
||||||
run: |
|
|
||||||
# TODO: Stop building test binaries as part of the build phase
|
|
||||||
# Used for GPU_FLAG since that doesn't play nice
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
docker run \
|
|
||||||
${GPU_FLAG:-} \
|
|
||||||
-e BUILD_ENVIRONMENT \
|
|
||||||
-e CUSTOM_TEST_ARTIFACT_BUILD_DIR \
|
|
||||||
-e IN_CI \
|
|
||||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
|
||||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
|
||||||
--security-opt seccomp=unconfined \
|
|
||||||
--cap-add=SYS_PTRACE \
|
|
||||||
--shm-size="${SHM_SIZE}" \
|
|
||||||
--tty \
|
|
||||||
--user jenkins \
|
|
||||||
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
|
|
||||||
-w /var/lib/jenkins/workspace \
|
|
||||||
"${DOCKER_IMAGE}" \
|
|
||||||
sh -c 'sudo chown -R jenkins . && pip install dist/*.whl && .jenkins/pytorch/test.sh'
|
|
||||||
- name: Chown workspace
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
name: Store PyTorch Test Reports
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: test-reports
|
|
||||||
retention-days: 30
|
|
||||||
if-no-files-found: error
|
|
||||||
path:
|
|
||||||
test/**/*.xml
|
|
||||||
- name: Clean up docker images
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
# Prune all of the docker images
|
|
||||||
docker system prune -af
|
|
||||||
render_test_results:
|
|
||||||
if: always()
|
|
||||||
needs:
|
|
||||||
- test
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
# deep clone, to allow tools/print_test_stats.py to use Git commands
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: actions/download-artifact@v2
|
|
||||||
name: Download PyTorch Test Reports
|
|
||||||
with:
|
|
||||||
name: test-reports
|
|
||||||
path: test/test-reports
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: 3.9
|
|
||||||
- name: Install dependencies
|
|
||||||
# boto3 version copied from .circleci/docker/common/install_conda.sh
|
|
||||||
run: |
|
|
||||||
pip install -r requirements.txt
|
|
||||||
pip install boto3==1.16.34 junitparser rich
|
|
||||||
- name: Output Test Results (Click Me)
|
|
||||||
run: |
|
|
||||||
python tools/render_junit.py test
|
|
||||||
- name: Parse ref
|
|
||||||
id: parse-ref
|
|
||||||
run: .github/scripts/parse_ref.py
|
|
||||||
- name: Display and upload test statistics (Click Me)
|
|
||||||
# temporary hack: set CIRCLE_* vars, until we update
|
|
||||||
# tools/print_test_stats.py to natively support GitHub Actions
|
|
||||||
env:
|
|
||||||
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
|
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_OSSCI_METRICS_SECRET_ACCESS_KEY }}
|
|
||||||
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
|
|
||||||
CIRCLE_JOB: !{{ build_environment }}
|
|
||||||
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
|
|
||||||
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
|
||||||
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}
|
|
||||||
CIRCLE_WORKFLOW_ID: ${{ github.run_id }} # dunno if this corresponds
|
|
||||||
run: |
|
|
||||||
export PYTHONPATH=$PWD
|
|
||||||
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test
|
|
||||||
{%- if enable_doc_jobs %}
|
|
||||||
pytorch_python_doc_build:
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
needs:
|
|
||||||
- calculate-docker-image
|
|
||||||
- build
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Log in to ECR
|
|
||||||
run: |
|
|
||||||
aws ecr get-login --no-include-email --region us-east-1 > /tmp/ecr-login.sh
|
|
||||||
bash /tmp/ecr-login.sh
|
|
||||||
rm /tmp/ecr-login.sh
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v alpine chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0 # deep clone, to allow sharding to use git rev-list
|
|
||||||
submodules: recursive
|
|
||||||
- name: Pull docker image
|
|
||||||
run: |
|
|
||||||
docker pull "${DOCKER_IMAGE}"
|
|
||||||
- name: Preserve github env variables for use in docker
|
|
||||||
run: |
|
|
||||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
|
||||||
- uses: actions/download-artifact@v2
|
|
||||||
name: Download PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_ENVIRONMENT }}
|
|
||||||
- name: Unzip artifacts
|
|
||||||
run: |
|
|
||||||
unzip -o artifacts.zip
|
|
||||||
- name: Build Python Doc in Docker
|
|
||||||
run: |
|
|
||||||
set -ex
|
|
||||||
time docker pull "${DOCKER_IMAGE}" > /dev/null
|
|
||||||
echo "${GITHUB_REF}"
|
|
||||||
ref=${GITHUB_REF##*/}
|
|
||||||
target=${ref//v}
|
|
||||||
docker run \
|
|
||||||
-e BUILD_ENVIRONMENT \
|
|
||||||
-e CUSTOM_TEST_ARTIFACT_BUILD_DIR \
|
|
||||||
-e IN_CI \
|
|
||||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
|
||||||
-e CIRCLE_SHA1="$GITHUB_SHA" \
|
|
||||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
|
||||||
--security-opt seccomp=unconfined \
|
|
||||||
--cap-add=SYS_PTRACE \
|
|
||||||
--name="$GITHUB_SHA" \
|
|
||||||
--tty \
|
|
||||||
--user jenkins \
|
|
||||||
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
|
|
||||||
-w /var/lib/jenkins/workspace \
|
|
||||||
"${DOCKER_IMAGE}" \
|
|
||||||
bash -c "sudo chown -R jenkins . && pip install dist/*.whl && ./.circleci/scripts/python_doc_push_script.sh docs/$target $target site"
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v alpine chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Archive artifacts into zip
|
|
||||||
run: |
|
|
||||||
zip -r pytorch_github_io.zip "${GITHUB_WORKSPACE}/pytorch.github.io"
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
name: Store PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: pytorch_github_io
|
|
||||||
if-no-files-found: error
|
|
||||||
path: pytorch_github_io.zip
|
|
||||||
- name: Clean up docker images
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Prune all of the docker images
|
|
||||||
docker system prune -af
|
|
||||||
{%- endif -%}
|
|
||||||
66
.github/workflows/add_annotations.yml
vendored
66
.github/workflows/add_annotations.yml
vendored
@ -1,66 +0,0 @@
|
|||||||
name: Add annotations
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_run:
|
|
||||||
types:
|
|
||||||
- completed
|
|
||||||
workflows:
|
|
||||||
- Lint
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
annotate:
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
name:
|
|
||||||
- flake8-py3
|
|
||||||
- clang-tidy
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Download artifact
|
|
||||||
uses: actions/github-script@v3
|
|
||||||
env:
|
|
||||||
RUN_ID: ${{ github.event.workflow_run.id }}
|
|
||||||
LINT_NAME: ${{ matrix.name }}
|
|
||||||
with:
|
|
||||||
# https://securitylab.github.com/research/github-actions-preventing-pwn-requests/
|
|
||||||
script: |
|
|
||||||
const artifacts = await github.actions.listWorkflowRunArtifacts({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
run_id: process.env.RUN_ID,
|
|
||||||
});
|
|
||||||
const filteredArtifacts = artifacts.data.artifacts.filter(artifact => {
|
|
||||||
return artifact.name == process.env.LINT_NAME;
|
|
||||||
});
|
|
||||||
if (filteredArtifacts.length > 0) {
|
|
||||||
const matchArtifact = filteredArtifacts[0];
|
|
||||||
const download = await github.actions.downloadArtifact({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
artifact_id: matchArtifact.id,
|
|
||||||
archive_format: 'zip',
|
|
||||||
});
|
|
||||||
const fs = require('fs');
|
|
||||||
fs.writeFileSync(
|
|
||||||
`${process.env.GITHUB_WORKSPACE}/linter-output.zip`,
|
|
||||||
Buffer.from(download.data),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
- name: Unzip artifact
|
|
||||||
id: unzip
|
|
||||||
run: |
|
|
||||||
if unzip linter-output.zip annotations.json commit-sha.txt; then
|
|
||||||
echo ::set-output \
|
|
||||||
name=sha::"$(grep -Em1 '^[[:xdigit:]]{40}$' commit-sha.txt)"
|
|
||||||
fi
|
|
||||||
- if: steps.unzip.outputs.sha
|
|
||||||
name: Add annotations
|
|
||||||
uses: pytorch/add-annotations-github-action@master
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
check_name: ${{ matrix.name }}
|
|
||||||
linter_output_path: annotations.json
|
|
||||||
commit_sha: ${{ steps.unzip.outputs.sha }}
|
|
||||||
mode: json
|
|
||||||
47
.github/workflows/auto_label.yml
vendored
47
.github/workflows/auto_label.yml
vendored
@ -1,47 +0,0 @@
|
|||||||
name: Label PRs & Issues
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [opened, edited]
|
|
||||||
pull_request_target:
|
|
||||||
types: [edited, opened, synchronize, reopened]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
auto-label-rocm:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Retrieve information
|
|
||||||
id: vars
|
|
||||||
env:
|
|
||||||
EVENT_NAME: ${{ github.event_name }}
|
|
||||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
|
||||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
|
||||||
ISSUE_TITLE: ${{ github.event.issue.title }}
|
|
||||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
|
||||||
run: |
|
|
||||||
set -eux
|
|
||||||
if [[ "$EVENT_NAME" == "pull_request_target" ]]; then
|
|
||||||
TITLE="${PR_TITLE}"
|
|
||||||
ISSUE_NUMBER="${PR_NUMBER}"
|
|
||||||
else
|
|
||||||
TITLE="${ISSUE_TITLE}"
|
|
||||||
# ISSUE_NUMBER is already set
|
|
||||||
fi
|
|
||||||
echo ::set-output name=TITLE::"${TITLE}"
|
|
||||||
echo ::set-output name=ISSUE_NUMBER::"${ISSUE_NUMBER}"
|
|
||||||
- name: Auto-label ROCm
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
TITLE: ${{ steps.vars.outputs.TITLE }}
|
|
||||||
ISSUE_NUMBER: ${{ steps.vars.outputs.ISSUE_NUMBER }}
|
|
||||||
OWNER: ${{ github.repository_owner }}
|
|
||||||
REPO: ${{ github.event.repository.name }}
|
|
||||||
run: |
|
|
||||||
set -eux
|
|
||||||
if [[ "${TITLE,,}" == *rocm* ]]; then
|
|
||||||
curl \
|
|
||||||
-X POST \
|
|
||||||
-H "Authorization: token ${GITHUB_TOKEN}" \
|
|
||||||
"https://api.github.com/repos/${OWNER}/${REPO}/issues/${ISSUE_NUMBER}/labels" \
|
|
||||||
-d '{"labels":["module: rocm"]}'
|
|
||||||
fi
|
|
||||||
@ -21,8 +21,8 @@ jobs:
|
|||||||
id: set-matrix
|
id: set-matrix
|
||||||
run: |
|
run: |
|
||||||
# outputting for debugging purposes
|
# outputting for debugging purposes
|
||||||
MATRIX=$(python .github/scripts/generate_binary_build_matrix.py wheels)
|
python .github/scripts/generate_binary_build_matrix.py
|
||||||
echo "${MATRIX}"
|
MATRIX=$(python .github/scripts/generate_binary_build_matrix.py)
|
||||||
echo "::set-output name=matrix::${MATRIX}"
|
echo "::set-output name=matrix::${MATRIX}"
|
||||||
build-wheel:
|
build-wheel:
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
if: ${{ github.repository_owner == 'pytorch' }}
|
||||||
@ -31,7 +31,6 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
${{ fromJson(needs.generate-build-matrix.outputs.matrix) }}
|
${{ fromJson(needs.generate-build-matrix.outputs.matrix) }}
|
||||||
fail-fast: false
|
|
||||||
container:
|
container:
|
||||||
image: ${{ matrix.container_image }}
|
image: ${{ matrix.container_image }}
|
||||||
env:
|
env:
|
||||||
@ -44,8 +43,6 @@ jobs:
|
|||||||
PYTORCH_BUILD_NUMBER: 1
|
PYTORCH_BUILD_NUMBER: 1
|
||||||
SKIP_ALL_TESTS: 1
|
SKIP_ALL_TESTS: 1
|
||||||
steps:
|
steps:
|
||||||
- name: Clean runner workspace
|
|
||||||
run: rm -rf "$GITHUB_WORKSPACE"
|
|
||||||
- name: Clone pytorch/pytorch
|
- name: Clone pytorch/pytorch
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
@ -61,17 +58,13 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
version=$(.github/scripts/generate_pytorch_version.py)
|
version=$(.github/scripts/generate_pytorch_version.py)
|
||||||
echo "Generated version: ${version}"
|
echo "Generated version: ${version}"
|
||||||
echo "PYTORCH_BUILD_VERSION=${version}" >> "$GITHUB_ENV"
|
echo "PYTORCH_BUILD_VERSION=${version}" >> $GITHUB_ENV
|
||||||
- name: Set BUILD_SPLIT_CUDA
|
|
||||||
if: ${{ matrix.gpu_arch_type == 'cuda' && matrix.gpu_arch_version == '11.1' }}
|
|
||||||
run: |
|
|
||||||
echo "BUILD_SPLIT_CUDA=1" >> "$GITHUB_ENV"
|
|
||||||
# TODO: Remove this once we remove the need for the directories to be
|
# TODO: Remove this once we remove the need for the directories to be
|
||||||
# in specific locations
|
# in specific locations
|
||||||
- name: Symlink repositories to root directory (for legacy scripts purposes)
|
- name: Symlink repositories to root directory (for legacy scripts purposes)
|
||||||
run: |
|
run: |
|
||||||
ln -s "$PWD"/pytorch /pytorch
|
ln -s $(pwd)/pytorch /pytorch
|
||||||
ln -s "$PWD"/builder /builder
|
ln -s $(pwd)/builder /builder
|
||||||
# TODO: Bundle the correct build script in the base container image so
|
# TODO: Bundle the correct build script in the base container image so
|
||||||
# that we don't have to do this type of specification
|
# that we don't have to do this type of specification
|
||||||
- name: Build PyTorch binary (CUDA specific)
|
- name: Build PyTorch binary (CUDA specific)
|
||||||
95
.github/workflows/build_linux_conda.yml
vendored
95
.github/workflows/build_linux_conda.yml
vendored
@ -1,95 +0,0 @@
|
|||||||
name: Build Linux Conda Packages
|
|
||||||
|
|
||||||
on:
|
|
||||||
# TODO: These are only runnable from workflow_dispatch, we need to eventually add
|
|
||||||
# a cron
|
|
||||||
# TODO: Add an on_release trigger to build on tags
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
generate-build-matrix:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
|
||||||
container:
|
|
||||||
image: python:3.9
|
|
||||||
steps:
|
|
||||||
- name: Clone pytorch/pytorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Generating build matrix
|
|
||||||
id: set-matrix
|
|
||||||
run: |
|
|
||||||
# outputting for debugging purposes
|
|
||||||
MATRIX=$(python .github/scripts/generate_binary_build_matrix.py conda)
|
|
||||||
echo "${MATRIX}"
|
|
||||||
echo "::set-output name=matrix::${MATRIX}"
|
|
||||||
build-conda:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs: generate-build-matrix
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
${{ fromJson(needs.generate-build-matrix.outputs.matrix) }}
|
|
||||||
fail-fast: false
|
|
||||||
container:
|
|
||||||
image: ${{ matrix.container_image }}
|
|
||||||
env:
|
|
||||||
DESIRED_PYTHON: ${{ matrix.python_version }}
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: ${{ matrix.gpu_arch_version }}
|
|
||||||
GPU_ARCH_VERSION: ${{ matrix.GPU_ARCH_VERSION }}
|
|
||||||
GPU_ARCH_TYPE: ${{ matrix.gpu_arch_type }}
|
|
||||||
NO_BUILD_SUFFIX: True
|
|
||||||
# TODO: This is a legacy variable, we should just default all build to use
|
|
||||||
# this folder within the conda/build_pytorch.sh script
|
|
||||||
TORCH_CONDA_BUILD_FOLDER: pytorch-nightly
|
|
||||||
# TODO: Another legacy env variable that isn't useful anymore, should default
|
|
||||||
# to pytorch within the scripts directly
|
|
||||||
ANACONDA_USER: pytorch
|
|
||||||
PYTORCH_FINAL_PACKAGE_DIR: /remote
|
|
||||||
# We specify the CONDA_BLD_PATH here since conda creates extremely long paths
|
|
||||||
# for its default build path
|
|
||||||
CONDA_BLD_PATH: /build
|
|
||||||
PYTORCH_BUILD_NUMBER: 1
|
|
||||||
SKIP_ALL_TESTS: 1
|
|
||||||
steps:
|
|
||||||
- name: Clean runner workspace
|
|
||||||
run: rm -rf "$GITHUB_WORKSPACE"
|
|
||||||
- name: Clone pytorch/pytorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
path: pytorch
|
|
||||||
submodules: recursive
|
|
||||||
- name: Clone pytorch/builder
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
repository: pytorch/builder
|
|
||||||
path: builder
|
|
||||||
- name: Generate version string
|
|
||||||
working-directory: pytorch/
|
|
||||||
run: |
|
|
||||||
version=$(.github/scripts/generate_pytorch_version.py)
|
|
||||||
echo "Generated version: ${version}"
|
|
||||||
echo "PYTORCH_BUILD_VERSION=${version}" >> "$GITHUB_ENV"
|
|
||||||
- name: Set BUILD_SPLIT_CUDA
|
|
||||||
if: ${{ matrix.gpu_arch_type == 'cuda' && matrix.gpu_arch_version == '11.1' }}
|
|
||||||
run: |
|
|
||||||
echo "BUILD_SPLIT_CUDA=1" >> "$GITHUB_ENV"
|
|
||||||
# TODO: Remove this once we remove the need for the directories to be
|
|
||||||
# in specific locations
|
|
||||||
- name: Symlink repositories to root directory (for legacy scripts purposes)
|
|
||||||
run: |
|
|
||||||
mv "$PWD"/pytorch /pytorch
|
|
||||||
mv "$PWD"/builder /builder
|
|
||||||
# TODO: Bundle the correct build script in the base container image so
|
|
||||||
# that we don't have to do this type of specification
|
|
||||||
- name: Build PyTorch binary
|
|
||||||
run: |
|
|
||||||
/builder/conda/build_pytorch.sh
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: pytorch-conda-py${{ matrix.python_version }}-${{matrix.gpu_arch_type}}-${{ matrix.gpu_arch_version }}
|
|
||||||
path: /remote/**/*.bz2
|
|
||||||
# TODO: Add a step here for uploading binaries
|
|
||||||
94
.github/workflows/build_linux_libtorch.yml
vendored
94
.github/workflows/build_linux_libtorch.yml
vendored
@ -1,94 +0,0 @@
|
|||||||
name: Build Linux libtorch
|
|
||||||
|
|
||||||
on:
|
|
||||||
# TODO: These are only runnable from workflow_dispatch, we need to eventually add
|
|
||||||
# a cron
|
|
||||||
# TODO: Add an on_release trigger to build on tags
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
generate-build-matrix:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
|
||||||
container:
|
|
||||||
image: python:3.9
|
|
||||||
steps:
|
|
||||||
- name: Clone pytorch/pytorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Generating build matrix
|
|
||||||
id: set-matrix
|
|
||||||
run: |
|
|
||||||
# outputting for debugging purposes
|
|
||||||
MATRIX=$(python .github/scripts/generate_binary_build_matrix.py libtorch)
|
|
||||||
echo "${MATRIX}"
|
|
||||||
echo "::set-output name=matrix::${MATRIX}"
|
|
||||||
build-libtorch:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
needs: generate-build-matrix
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
${{ fromJson(needs.generate-build-matrix.outputs.matrix) }}
|
|
||||||
fail-fast: false
|
|
||||||
container:
|
|
||||||
image: ${{ matrix.container_image }}
|
|
||||||
env:
|
|
||||||
# TODO: remove this var from the libtorch builder script(s)
|
|
||||||
DESIRED_PYTHON: '3.7'
|
|
||||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
|
||||||
# favor of GPU_ARCH_VERSION
|
|
||||||
DESIRED_CUDA: ${{ matrix.gpu_arch_version }}
|
|
||||||
GPU_ARCH_VERSION: ${{ matrix.GPU_ARCH_VERSION }}
|
|
||||||
GPU_ARCH_TYPE: ${{ matrix.gpu_arch_type }}
|
|
||||||
BUILD_PYTHONLESS: 1
|
|
||||||
LIBTORCH_VARIANT: ${{ matrix.libtorch_variant }}
|
|
||||||
# TODO: remove this and bake env var into the Docker image
|
|
||||||
DESIRED_DEVTOOLSET: ${{ matrix.devtoolset }}
|
|
||||||
PYTORCH_BUILD_NUMBER: 1
|
|
||||||
SKIP_ALL_TESTS: 1
|
|
||||||
steps:
|
|
||||||
- name: Clean runner workspace
|
|
||||||
run: rm -rf "$GITHUB_WORKSPACE"
|
|
||||||
- name: Clone pytorch/pytorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
path: pytorch
|
|
||||||
submodules: recursive
|
|
||||||
- name: Clone pytorch/builder
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
repository: pytorch/builder
|
|
||||||
path: builder
|
|
||||||
- name: Generate version string
|
|
||||||
working-directory: pytorch/
|
|
||||||
run: |
|
|
||||||
version=$(.github/scripts/generate_pytorch_version.py)
|
|
||||||
echo "Generated version: ${version}"
|
|
||||||
echo "PYTORCH_BUILD_VERSION=${version}" >> "$GITHUB_ENV"
|
|
||||||
- name: Set BUILD_SPLIT_CUDA
|
|
||||||
if: ${{ matrix.gpu_arch_type == 'cuda' && matrix.gpu_arch_version == '11.1' }}
|
|
||||||
run: |
|
|
||||||
echo "BUILD_SPLIT_CUDA=1" >> "$GITHUB_ENV"
|
|
||||||
# TODO: Remove this once we remove the need for the directories to be
|
|
||||||
# in specific locations
|
|
||||||
- name: Symlink repositories to root directory (for legacy scripts purposes)
|
|
||||||
run: |
|
|
||||||
ln -s "$PWD"/pytorch /pytorch
|
|
||||||
ln -s "$PWD"/builder /builder
|
|
||||||
# TODO: Bundle the correct build script in the base container image so
|
|
||||||
# that we don't have to do this type of specification
|
|
||||||
- name: Build PyTorch binary (CUDA specific)
|
|
||||||
if: ${{ matrix.gpu_arch_type == 'cuda' }}
|
|
||||||
run: |
|
|
||||||
/builder/manywheel/build.sh
|
|
||||||
- name: Build PyTorch binary (CPU specific)
|
|
||||||
if: ${{ matrix.gpu_arch_type == 'cpu' }}
|
|
||||||
run: |
|
|
||||||
/builder/manywheel/build_cpu.sh
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: pytorch-libtorch-${{ matrix.libtorch_variant }}-${{ matrix.devtoolset }}-${{matrix.gpu_arch_type}}-${{ matrix.gpu_arch_version }}
|
|
||||||
path: /remote/**/*.zip
|
|
||||||
# TODO: Add a step here for uploading binaries
|
|
||||||
24
.github/workflows/cancel_redundant_workflows.yml
vendored
24
.github/workflows/cancel_redundant_workflows.yml
vendored
@ -1,24 +0,0 @@
|
|||||||
name: Cancel redundant workflows
|
|
||||||
on:
|
|
||||||
workflow_run:
|
|
||||||
types:
|
|
||||||
- requested
|
|
||||||
# NOTE: Make sure to add to this list as you add more workflows running on 'pull_request'
|
|
||||||
workflows:
|
|
||||||
- Lint
|
|
||||||
- Linux CI (pytorch-linux-xenial-py3.6-gcc5.4)
|
|
||||||
- Test tools
|
|
||||||
- TorchBench CI (pytorch-linux-py3.7-cu102)
|
|
||||||
- clang-format
|
|
||||||
jobs:
|
|
||||||
cancel:
|
|
||||||
# We do not want to cancel reruns on master
|
|
||||||
if: github.event.workflow_run.head_branch != 'master'
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Cancel duplicate workflow runs
|
|
||||||
uses: potiuk/cancel-workflow-runs@a81b3c4d59c61e27484cfacdc13897dd908419c9
|
|
||||||
with:
|
|
||||||
cancelMode: duplicates
|
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
sourceRunId: ${{ github.event.workflow_run.id }}
|
|
||||||
27
.github/workflows/clang_format.yml
vendored
27
.github/workflows/clang_format.yml
vendored
@ -8,37 +8,46 @@ jobs:
|
|||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-18.04
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v1
|
||||||
with:
|
with:
|
||||||
python-version: 3.x
|
python-version: 3.x
|
||||||
architecture: x64
|
architecture: x64
|
||||||
- name: Fetch PyTorch
|
- name: Fetch PyTorch
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v1
|
||||||
with:
|
- name: Checkout PR tip
|
||||||
fetch-depth: 0 # deep clone, to allow us to use git merge-base
|
run: |
|
||||||
|
set -eux
|
||||||
|
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||||
|
# We are on a PR, so actions/checkout leaves us on a merge commit.
|
||||||
|
# Check out the actual tip of the branch.
|
||||||
|
git checkout ${{ github.event.pull_request.head.sha }}
|
||||||
|
fi
|
||||||
|
echo ::set-output name=commit_sha::$(git rev-parse HEAD)
|
||||||
|
id: get_pr_tip
|
||||||
- name: Run clang-format
|
- name: Run clang-format
|
||||||
env:
|
|
||||||
BASE_SHA: ${{ github.event.pull_request.base.sha }}
|
|
||||||
run: |
|
run: |
|
||||||
set -eu
|
set -eu
|
||||||
# This is necessary to get the same results regardless of whether the
|
# This is necessary to get the same results regardless of whether the
|
||||||
# PR was opened directly or from a forked repo. See: `9f890a92` for more info.
|
# PR was opened directly or from a forked repo. See: `9f890a92` for more info.
|
||||||
git remote add upstream https://github.com/pytorch/pytorch
|
git remote add upstream https://github.com/pytorch/pytorch
|
||||||
git fetch upstream "$GITHUB_BASE_REF"
|
git fetch upstream "$GITHUB_BASE_REF"
|
||||||
|
BASE_SHA=${{ github.event.pull_request.base.sha }}
|
||||||
|
HEAD_SHA=${{ github.event.pull_request.head.sha }}
|
||||||
|
MERGE_BASE=$(git merge-base $BASE_SHA $HEAD_SHA)
|
||||||
|
|
||||||
# only run clang-format on allowlisted files
|
# only run clang-format on allowlisted files
|
||||||
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
||||||
echo "| clang-format failures found! Run: "
|
echo "| clang-format failures found! Run: "
|
||||||
echo "| tools/clang_format_ci.sh ${BASE_SHA} "
|
echo "| tools/clang_format_ci.sh ${MERGE_BASE} "
|
||||||
echo "| to fix this error. "
|
echo "| to fix this error. "
|
||||||
echo "| For more info, see: https://github.com/pytorch/pytorch/wiki/clang-format "
|
echo "| For more info, see: https://github.com/pytorch/pytorch/wiki/clang-format "
|
||||||
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
||||||
|
|
||||||
tools/clang_format_ci.sh "${BASE_SHA}"
|
tools/clang_format_ci.sh ${MERGE_BASE}
|
||||||
|
|
||||||
GIT_DIFF=$(git diff)
|
GIT_DIFF=$(git diff)
|
||||||
if [[ -z $GIT_DIFF ]]; then
|
if [[ -z $GIT_DIFF ]]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
echo "$GIT_DIFF"
|
echo $GIT_DIFF
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
317
.github/workflows/lint.yml
vendored
317
.github/workflows/lint.yml
vendored
@ -11,234 +11,116 @@ jobs:
|
|||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-18.04
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v1
|
||||||
with:
|
with:
|
||||||
python-version: 3.x
|
python-version: 3.x
|
||||||
architecture: x64
|
architecture: x64
|
||||||
- name: Checkout PyTorch
|
- name: Checkout PyTorch
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v1
|
||||||
- name: Install requirements
|
- name: Checkout PR tip
|
||||||
id: requirements
|
run: |
|
||||||
run: pip install -r requirements.txt
|
set -eux
|
||||||
|
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||||
|
# We are on a PR, so actions/checkout leaves us on a merge commit.
|
||||||
|
# Check out the actual tip of the branch.
|
||||||
|
git checkout ${{ github.event.pull_request.head.sha }}
|
||||||
|
fi
|
||||||
|
echo ::set-output name=commit_sha::$(git rev-parse HEAD)
|
||||||
|
id: get_pr_tip
|
||||||
- name: Ensure consistent CircleCI YAML config
|
- name: Ensure consistent CircleCI YAML config
|
||||||
if: always() && steps.requirements.outcome == 'success'
|
|
||||||
run: cd .circleci && ./ensure-consistency.py
|
|
||||||
- name: Ensure consistent GHA workflows in cancel_redundant_workflows.yml
|
|
||||||
if: always() && steps.requirements.outcome == 'success'
|
|
||||||
run: |
|
run: |
|
||||||
pip install ruamel.yaml==0.17.4
|
pip install -r requirements.txt
|
||||||
echo "Please locally run .github/scripts/regenerate_cancel_redundant_workflow.py and commit if this step fails."
|
cd .circleci && ./ensure-consistency.py
|
||||||
.github/scripts/regenerate_cancel_redundant_workflow.py
|
- name: Shellcheck Jenkins scripts
|
||||||
git diff --exit-code .github/workflows/cancel_redundant_workflows.yml
|
# https://github.com/koalaman/shellcheck#installing-a-pre-compiled-binary
|
||||||
- name: Lint native_functions.yaml
|
|
||||||
if: always() && steps.requirements.outcome == 'success'
|
|
||||||
run: |
|
run: |
|
||||||
pip install ruamel.yaml==0.17.4
|
scversion="stable"
|
||||||
.github/scripts/lint_native_functions.py
|
|
||||||
- name: Extract scripts from GitHub Actions workflows
|
|
||||||
if: always() && steps.requirements.outcome == 'success'
|
|
||||||
run: |
|
|
||||||
# For local lints, remove the .extracted_scripts folder if it was already there
|
|
||||||
rm -rf .extracted_scripts
|
|
||||||
tools/extract_scripts.py --out=.extracted_scripts
|
|
||||||
- name: Install ShellCheck
|
|
||||||
id: install_shellcheck
|
|
||||||
if: always()
|
|
||||||
# https://github.com/koalaman/shellcheck/tree/v0.7.2#installing-a-pre-compiled-binary
|
|
||||||
run: |
|
|
||||||
set -x
|
|
||||||
scversion="v0.7.2"
|
|
||||||
wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.x86_64.tar.xz" | tar -xJv
|
wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.x86_64.tar.xz" | tar -xJv
|
||||||
sudo cp "shellcheck-${scversion}/shellcheck" /usr/bin/
|
sudo cp "shellcheck-${scversion}/shellcheck" /usr/bin/
|
||||||
rm -r "shellcheck-${scversion}"
|
rm -r "shellcheck-${scversion}"
|
||||||
shellcheck --version
|
shellcheck --version
|
||||||
- name: Run ShellCheck
|
.jenkins/run-shellcheck.sh
|
||||||
if: always() && steps.install_shellcheck.outcome == 'success'
|
|
||||||
run: |
|
|
||||||
tools/run_shellcheck.sh .jenkins/pytorch .extracted_scripts
|
|
||||||
- name: Ensure correct trailing newlines
|
|
||||||
if: always() && steps.requirements.outcome == 'success'
|
|
||||||
run: |
|
|
||||||
(! git --no-pager grep -Il '' -- . ':(exclude)**/contrib/**' ':(exclude)third_party' ':(exclude)**.expect' ':(exclude)tools/clang_format_hash' | tools/trailing_newlines.py || (echo "The above files do not have correct trailing newlines; please normalize them"; false))
|
|
||||||
- name: Ensure no trailing spaces
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
(! git --no-pager grep -In '[[:blank:]]$' -- . ':(exclude)**/contrib/**' ':(exclude)third_party' || (echo "The above lines have trailing spaces; please remove them"; false))
|
|
||||||
- name: Ensure no tabs
|
- name: Ensure no tabs
|
||||||
if: always()
|
|
||||||
run: |
|
run: |
|
||||||
(! git --no-pager grep -In $'\t' -- . ':(exclude)*.svg' ':(exclude)**Makefile' ':(exclude)**/contrib/**' ':(exclude)third_party' ':(exclude).gitattributes' ':(exclude).gitmodules' || (echo "The above lines have tabs; please convert them to spaces"; false))
|
(! git grep -I -l $'\t' -- . ':(exclude)*.svg' ':(exclude)**Makefile' ':(exclude)**/contrib/**' ':(exclude)third_party' ':(exclude).gitattributes' ':(exclude).gitmodules' || (echo "The above files have tabs; please convert them to spaces"; false))
|
||||||
- name: Ensure no non-breaking spaces
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# NB: We use 'printf' below rather than '\u000a' since bash pre-4.2
|
|
||||||
# does not support the '\u000a' syntax (which is relevant for local linters)
|
|
||||||
(! git --no-pager grep -In "$(printf '\xC2\xA0')" -- . || (echo "The above lines have non-breaking spaces (U+00A0); please convert them to spaces (U+0020)"; false))
|
|
||||||
- name: Ensure canonical include
|
- name: Ensure canonical include
|
||||||
if: always()
|
|
||||||
run: |
|
run: |
|
||||||
(! git --no-pager grep -In $'#include "' -- ./c10 ./aten ./torch/csrc ':(exclude)aten/src/ATen/native/quantized/cpu/qnnpack/**' || (echo "The above lines have include with quotes; please convert them to #include <xxxx>"; false))
|
(! git grep -I -l $'#include "' -- ./c10 ./aten ./torch/csrc ':(exclude)aten/src/ATen/native/quantized/cpu/qnnpack/**' || (echo "The above files have include with quotes; please convert them to #include <xxxx>"; false))
|
||||||
- name: Ensure no versionless Python shebangs
|
# note that this next step depends on a clean heckout;
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
(! git --no-pager grep -In '#!.*python$' -- . || (echo "The above lines have versionless Python shebangs; please specify either python2 or python3"; false))
|
|
||||||
- name: Ensure no unqualified noqa
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# shellcheck disable=SC2016
|
|
||||||
(! git --no-pager grep -InP '# noqa(?!: [A-Z]+\d{3})' -- '**.py' '**.pyi' ':(exclude)caffe2' || (echo 'The above lines have unqualified `noqa`; please convert them to `noqa: XXXX`'; false))
|
|
||||||
- name: Ensure no unqualified type ignore
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# shellcheck disable=SC2016
|
|
||||||
(! git --no-pager grep -InP '# type:\s*ignore(?!\[)' -- '**.py' '**.pyi' ':(exclude)test/test_jit.py' || (echo 'The above lines have unqualified `type: ignore`; please convert them to `type: ignore[xxxx]`'; false))
|
|
||||||
# note that this next step depends on a clean checkout;
|
|
||||||
# if you run it locally then it will likely to complain
|
# if you run it locally then it will likely to complain
|
||||||
# about all the generated files in torch/test
|
# about all the generated files in torch/test
|
||||||
- name: Ensure C++ source files are not executable
|
- name: Ensure C++ source files are not executable
|
||||||
if: always()
|
|
||||||
run: |
|
run: |
|
||||||
# shellcheck disable=SC2016
|
|
||||||
(! find . \( -path ./third_party -o -path ./.git -o -path ./torch/bin -o -path ./build \) -prune -o -type f -executable -regextype posix-egrep -not -regex '.+(\.(bash|sh|py|so)|git-pre-commit|git-clang-format|gradlew)$' -print | grep . || (echo 'The above files have executable permission; please remove their executable permission by using `chmod -x`'; false))
|
(! find . \( -path ./third_party -o -path ./.git -o -path ./torch/bin -o -path ./build \) -prune -o -type f -executable -regextype posix-egrep -not -regex '.+(\.(bash|sh|py|so)|git-pre-commit|git-clang-format|gradlew)$' -print | grep . || (echo 'The above files have executable permission; please remove their executable permission by using `chmod -x`'; false))
|
||||||
- name: C++ docs check
|
- name: C++ docs check
|
||||||
if: always() && steps.requirements.outcome == 'success'
|
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y doxygen
|
sudo apt-get install -y doxygen && pip install -r requirements.txt
|
||||||
cd docs/cpp/source && ./check-doxygen.sh
|
cd docs/cpp/source && ./check-doxygen.sh
|
||||||
- name: CUDA kernel launch check
|
- name: CUDA kernel launch check
|
||||||
if: always() && steps.requirements.outcome == 'success'
|
|
||||||
run: |
|
run: |
|
||||||
set -eux
|
set -eux
|
||||||
python torch/testing/check_kernel_launches.py |& tee "${GITHUB_WORKSPACE}"/cuda_kernel_launch_checks.txt
|
python torch/testing/check_kernel_launches.py |& tee ${GITHUB_WORKSPACE}/cuda_kernel_launch_checks.txt
|
||||||
- name: Ensure no direct cub include
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
(! git --no-pager grep -I -no $'#include <cub/' -- ./aten ':(exclude)aten/src/ATen/cuda/cub.cuh' || (echo "The above files have direct cub include; please include ATen/cuda/cub.cuh instead and wrap your cub calls in at::native namespace if necessary"; false))
|
|
||||||
|
|
||||||
py2-setup-validate-errormsg:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Setup Python
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: 2.x
|
|
||||||
architecture: x64
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Attempt to run setup.py
|
|
||||||
run: |
|
|
||||||
python2 setup.py | grep "Python 2 has reached end-of-life and is no longer supported by PyTorch."
|
|
||||||
|
|
||||||
templates:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Setup Python
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: 3.x
|
|
||||||
architecture: x64
|
|
||||||
- name: Install Jinja2
|
|
||||||
run: pip install Jinja2
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Regenerate workflows
|
|
||||||
run: .github/scripts/generate_linux_ci_workflows.py
|
|
||||||
- name: Assert that regenerating the workflows didn't change them
|
|
||||||
run: .github/scripts/report_git_status.sh
|
|
||||||
|
|
||||||
toc:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
# https://github.com/actions/virtual-environments/issues/599#issuecomment-602754687
|
|
||||||
env:
|
|
||||||
NPM_CONFIG_PREFIX: ~/.npm-global
|
|
||||||
steps:
|
|
||||||
- name: Setup Node
|
|
||||||
uses: actions/setup-node@v2
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Install markdown-toc
|
|
||||||
run: npm install -g markdown-toc
|
|
||||||
- name: Regenerate ToCs and check that they didn't change
|
|
||||||
run: |
|
|
||||||
set -eux
|
|
||||||
export PATH=~/.npm-global/bin:"$PATH"
|
|
||||||
for FILE in $(git grep -Il '<!-- toc -->' -- '**.md'); do
|
|
||||||
markdown-toc --bullets='-' -i "$FILE"
|
|
||||||
done
|
|
||||||
|
|
||||||
.github/scripts/report_git_status.sh
|
|
||||||
|
|
||||||
flake8-py3:
|
flake8-py3:
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-18.04
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v1
|
||||||
with:
|
with:
|
||||||
python-version: 3.x
|
python-version: 3.x
|
||||||
architecture: x64
|
architecture: x64
|
||||||
- name: Fetch PyTorch
|
- name: Fetch PyTorch
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v1
|
||||||
with:
|
- name: Checkout PR tip
|
||||||
fetch-depth: 2 # to allow us to use github.event.pull_request.head.sha
|
|
||||||
- name: Prepare output dir with HEAD commit SHA
|
|
||||||
env:
|
|
||||||
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
|
||||||
run: |
|
|
||||||
mkdir flake8-output
|
|
||||||
cd flake8-output
|
|
||||||
echo "$HEAD_SHA" > commit-sha.txt
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
run: |
|
||||||
set -eux
|
set -eux
|
||||||
pip install typing-extensions # for tools/translate_annotations.py
|
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||||
pip install -r requirements-flake8.txt
|
# We are on a PR, so actions/checkout leaves us on a merge commit.
|
||||||
flake8 --version
|
# Check out the actual tip of the branch.
|
||||||
|
git checkout ${{ github.event.pull_request.head.sha }}
|
||||||
|
fi
|
||||||
|
echo ::set-output name=commit_sha::$(git rev-parse HEAD)
|
||||||
|
id: get_pr_tip
|
||||||
- name: Run flake8
|
- name: Run flake8
|
||||||
run: |
|
run: |
|
||||||
set -eux
|
set -eux
|
||||||
flake8 | tee "${GITHUB_WORKSPACE}"/flake8-output.txt
|
pip install -r requirements-flake8.txt
|
||||||
- name: Translate annotations
|
flake8 --version
|
||||||
if: github.event_name == 'pull_request'
|
flake8 | tee ${GITHUB_WORKSPACE}/flake8-output.txt
|
||||||
env:
|
- name: Add annotations
|
||||||
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
uses: pytorch/add-annotations-github-action@master
|
||||||
run: |
|
|
||||||
tools/translate_annotations.py \
|
|
||||||
--file="${GITHUB_WORKSPACE}"/flake8-output.txt \
|
|
||||||
--regex='^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorCode>\w+\d+) (?P<errorDesc>.*)' \
|
|
||||||
--commit="$HEAD_SHA" \
|
|
||||||
> flake8-output/annotations.json
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
with:
|
with:
|
||||||
name: flake8-py3
|
check_name: 'flake8-py3'
|
||||||
path: flake8-output/
|
linter_output_path: 'flake8-output.txt'
|
||||||
- name: Fail if there were any warnings
|
commit_sha: ${{ steps.get_pr_tip.outputs.commit_sha }}
|
||||||
|
regex: '^(?<filename>.*?):(?<lineNumber>\d+):(?<columnNumber>\d+): (?<errorCode>\w+\d+) (?<errorDesc>.*)'
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Catch any other warnings
|
||||||
run: |
|
run: |
|
||||||
set -eux
|
[ ! -s flake8-output.txt ]
|
||||||
# Re-output flake8 status so GitHub logs show it on the step that actually failed
|
|
||||||
cat "${GITHUB_WORKSPACE}"/flake8-output.txt
|
|
||||||
[ ! -s "${GITHUB_WORKSPACE}"/flake8-output.txt ]
|
|
||||||
|
|
||||||
clang-tidy:
|
clang-tidy:
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-18.04
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v1
|
||||||
with:
|
with:
|
||||||
python-version: 3.x
|
python-version: 3.x
|
||||||
architecture: x64
|
architecture: x64
|
||||||
- name: Checkout PyTorch
|
- name: Checkout PyTorch
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v1
|
||||||
with:
|
- name: Checkout PR tip
|
||||||
fetch-depth: 0 # to allow tools/clang_tidy.py to do its thing
|
|
||||||
- name: Prepare output dir with HEAD commit SHA
|
|
||||||
env:
|
|
||||||
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
|
||||||
run: |
|
run: |
|
||||||
mkdir clang-tidy-output
|
set -eux
|
||||||
cd clang-tidy-output
|
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||||
echo "$HEAD_SHA" > commit-sha.txt
|
# We are on a PR, so actions/checkout leaves us on a merge commit.
|
||||||
|
# Check out the actual tip of the branch.
|
||||||
|
git checkout ${{ github.event.pull_request.head.sha }}
|
||||||
|
fi
|
||||||
|
echo ::set-output name=commit_sha::$(git rev-parse HEAD)
|
||||||
|
id: get_pr_tip
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
set -eux
|
set -eux
|
||||||
@ -256,17 +138,19 @@ jobs:
|
|||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y clang-tidy-11
|
sudo apt-get install -y clang-tidy-11
|
||||||
sudo update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-11 1000
|
sudo update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-11 1000
|
||||||
- name: Generate build files
|
- name: Run clang-tidy
|
||||||
run: |
|
run: |
|
||||||
set -eux
|
set -eux
|
||||||
git remote add upstream https://github.com/pytorch/pytorch
|
git remote add upstream https://github.com/pytorch/pytorch
|
||||||
git fetch upstream "$GITHUB_BASE_REF"
|
git fetch upstream "$GITHUB_BASE_REF"
|
||||||
|
BASE_SHA=${{ github.event.pull_request.base.sha }}
|
||||||
|
HEAD_SHA=${{ github.event.pull_request.head.sha }}
|
||||||
|
MERGE_BASE=$(git merge-base $BASE_SHA $HEAD_SHA)
|
||||||
|
|
||||||
if [[ ! -d build ]]; then
|
if [[ ! -d build ]]; then
|
||||||
git submodule update --init --recursive
|
git submodule update --init --recursive
|
||||||
|
|
||||||
export USE_NCCL=0
|
export USE_NCCL=0
|
||||||
export USE_DEPLOY=1
|
|
||||||
# We really only need compile_commands.json, so no need to build!
|
# We really only need compile_commands.json, so no need to build!
|
||||||
time python setup.py --cmake-only build
|
time python setup.py --cmake-only build
|
||||||
|
|
||||||
@ -281,12 +165,6 @@ jobs:
|
|||||||
--native-functions-path aten/src/ATen/native/native_functions.yaml \
|
--native-functions-path aten/src/ATen/native/native_functions.yaml \
|
||||||
--nn-path aten/src
|
--nn-path aten/src
|
||||||
fi
|
fi
|
||||||
- name: Run clang-tidy
|
|
||||||
env:
|
|
||||||
BASE_SHA: ${{ github.event.pull_request.base.sha }}
|
|
||||||
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
|
||||||
run: |
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
# Run Clang-Tidy
|
# Run Clang-Tidy
|
||||||
# The negative filters below are to exclude files that include onnx_pb.h or
|
# The negative filters below are to exclude files that include onnx_pb.h or
|
||||||
@ -299,7 +177,7 @@ jobs:
|
|||||||
python tools/clang_tidy.py \
|
python tools/clang_tidy.py \
|
||||||
--verbose \
|
--verbose \
|
||||||
--paths torch/csrc/ \
|
--paths torch/csrc/ \
|
||||||
--diff "$BASE_SHA" \
|
--diff "$MERGE_BASE" \
|
||||||
-g"-torch/csrc/jit/passes/onnx/helper.cpp" \
|
-g"-torch/csrc/jit/passes/onnx/helper.cpp" \
|
||||||
-g"-torch/csrc/jit/passes/onnx/shape_type_inference.cpp"\
|
-g"-torch/csrc/jit/passes/onnx/shape_type_inference.cpp"\
|
||||||
-g"-torch/csrc/jit/serialization/onnx.cpp" \
|
-g"-torch/csrc/jit/serialization/onnx.cpp" \
|
||||||
@ -316,67 +194,44 @@ jobs:
|
|||||||
-g"-torch/csrc/deploy/interpreter/interpreter.h" \
|
-g"-torch/csrc/deploy/interpreter/interpreter.h" \
|
||||||
-g"-torch/csrc/deploy/interpreter/interpreter_impl.h" \
|
-g"-torch/csrc/deploy/interpreter/interpreter_impl.h" \
|
||||||
-g"-torch/csrc/deploy/interpreter/test_main.cpp" \
|
-g"-torch/csrc/deploy/interpreter/test_main.cpp" \
|
||||||
"$@" > "${GITHUB_WORKSPACE}"/clang-tidy-output.txt
|
"$@" > ${GITHUB_WORKSPACE}/clang-tidy-output.txt
|
||||||
|
|
||||||
cat "${GITHUB_WORKSPACE}"/clang-tidy-output.txt
|
cat ${GITHUB_WORKSPACE}/clang-tidy-output.txt
|
||||||
|
- name: Add annotations
|
||||||
tools/translate_annotations.py \
|
uses: suo/add-annotations-github-action@master
|
||||||
--file=clang-tidy-output.txt \
|
|
||||||
--regex='^(?P<filename>.*?):(?P<lineNumber>\d+):(?P<columnNumber>\d+): (?P<errorDesc>.*?) \[(?P<errorCode>.*)\]' \
|
|
||||||
--commit="$HEAD_SHA" \
|
|
||||||
> clang-tidy-output/annotations.json
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
with:
|
with:
|
||||||
name: clang-tidy
|
check_name: 'clang-tidy'
|
||||||
path: clang-tidy-output/
|
linter_output_path: 'clang-tidy-output.txt'
|
||||||
|
commit_sha: ${{ steps.get_pr_tip.outputs.commit_sha }}
|
||||||
|
regex: '^(?<filename>.*?):(?<lineNumber>\d+):(?<columnNumber>\d+): (?<errorDesc>.*?) \[(?<errorCode>.*)\]'
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
cmakelint:
|
cmakelint:
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-18.04
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Python
|
- name: Setup Python
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v1
|
||||||
with:
|
with:
|
||||||
python-version: 3.x
|
python-version: 3.x
|
||||||
architecture: x64
|
architecture: x64
|
||||||
- name: Fetch PyTorch
|
- name: Fetch PyTorch
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v1
|
||||||
- name: Install dependencies
|
- name: Checkout PR tip
|
||||||
|
run: |
|
||||||
|
set -eux
|
||||||
|
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||||
|
# We are on a PR, so actions/checkout leaves us on a merge commit.
|
||||||
|
# Check out the actual tip of the branch.
|
||||||
|
git checkout ${{ github.event.pull_request.head.sha }}
|
||||||
|
fi
|
||||||
|
echo ::set-output name=commit_sha::$(git rev-parse HEAD)
|
||||||
|
id: get_pr_tip
|
||||||
|
- name: Run cmakelint
|
||||||
run: |
|
run: |
|
||||||
set -eux
|
set -eux
|
||||||
pip install cmakelint
|
pip install cmakelint
|
||||||
cmakelint --version
|
cmakelint --version
|
||||||
- name: Run cmakelint
|
|
||||||
run: |
|
|
||||||
set -eux
|
|
||||||
git ls-files -z -- bootstrap '*.cmake' '*.cmake.in' '*CMakeLists.txt' | \
|
git ls-files -z -- bootstrap '*.cmake' '*.cmake.in' '*CMakeLists.txt' | \
|
||||||
grep -E -z -v '^(cmake/Modules/|cmake/Modules_CUDA_fix/|cmake/Caffe2Config.cmake.in|aten/src/ATen/ATenConfig.cmake.in|cmake/Caffe2ConfigVersion.cmake.in|cmake/TorchConfig.cmake.in|cmake/TorchConfigVersion.cmake.in|cmake/cmake_uninstall.cmake.in)' | \
|
grep -E -z -v '^(cmake/Modules/|cmake/Modules_CUDA_fix/)' | \
|
||||||
xargs -0 cmakelint --config=.cmakelintrc --spaces=2 --quiet
|
xargs -0 cmakelint --config=.cmakelintrc --spaces=2 --quiet
|
||||||
|
|
||||||
mypy:
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Setup Python
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: 3.8
|
|
||||||
architecture: x64
|
|
||||||
- name: Fetch PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
set -eux
|
|
||||||
pip install -r requirements.txt
|
|
||||||
pip install mypy==0.812
|
|
||||||
# Needed to check tools/render_junit.py
|
|
||||||
pip install junitparser rich
|
|
||||||
- name: Run autogen
|
|
||||||
run: |
|
|
||||||
set -eux
|
|
||||||
time python -mtools.generate_torch_version --is_debug=false
|
|
||||||
time python -mtools.codegen.gen -s aten/src/ATen -d build/aten/src/ATen
|
|
||||||
time python -mtools.pyi.gen_pyi --native-functions-path aten/src/ATen/native/native_functions.yaml --deprecated-functions-path "tools/autograd/deprecated.yaml"
|
|
||||||
- name: Run mypy
|
|
||||||
run: |
|
|
||||||
set -eux
|
|
||||||
for CONFIG in mypy*.ini; do mypy --config="$CONFIG"; done
|
|
||||||
|
|||||||
22
.github/workflows/push_nightly_docker_ghcr.yml
vendored
22
.github/workflows/push_nightly_docker_ghcr.yml
vendored
@ -1,22 +0,0 @@
|
|||||||
name: Build PyTorch nightly Docker image and push to GitHub Container Registry
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
# Push the nightly docker daily at 1 PM UTC
|
|
||||||
- cron: '0 13 * * *'
|
|
||||||
# Have the ability to trigger this job manually using the API as well
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-publish-docker:
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' }}
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
env:
|
|
||||||
GHCR_PAT: ${{ secrets.GHCR_PAT }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
ref: master
|
|
||||||
- name: Build and upload nightly docker
|
|
||||||
run: |
|
|
||||||
bash .github/scripts/build_publish_nightly_docker.sh
|
|
||||||
@ -1,288 +0,0 @@
|
|||||||
# @generated DO NOT EDIT MANUALLY
|
|
||||||
# Template is at: .github/templates/linux_ci_workflow.yml
|
|
||||||
# Generation script: .github/scripts/generate_linux_ci_workflows.py
|
|
||||||
name: Linux CI (pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7)
|
|
||||||
|
|
||||||
on:
|
|
||||||
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- release/*
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7
|
|
||||||
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7
|
|
||||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
|
||||||
TORCH_CUDA_ARCH_LIST: 5.2
|
|
||||||
IN_CI: 1
|
|
||||||
# Used for custom_opertor, jit_hooks, custom_backend, see .jenkins/pytorch/build.sh
|
|
||||||
CUSTOM_TEST_ARTIFACT_BUILD_DIR: build/custom_test_artifacts
|
|
||||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
calculate-docker-image:
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
timeout-minutes: 90
|
|
||||||
outputs:
|
|
||||||
docker_image: ${{ steps.calculate-tag.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
# deep clone, to allow use of git merge-base
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Calculate docker image tag
|
|
||||||
id: calculate-tag
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=$(git rev-parse HEAD:.circleci/docker)
|
|
||||||
echo "::set-output name=docker_tag::${DOCKER_TAG}"
|
|
||||||
echo "::set-output name=docker_image::${DOCKER_IMAGE_BASE}:${DOCKER_TAG}"
|
|
||||||
- name: Check if image should be built
|
|
||||||
id: check
|
|
||||||
env:
|
|
||||||
DOCKER_TAG: ${{ steps.calculate-tag.outputs.docker_tag }}
|
|
||||||
BASE_REVISION: ${{ github.event.pull_request.base.sha || github.sha }}
|
|
||||||
run: |
|
|
||||||
eval "$(aws ecr get-login --no-include-email --region us-east-1)"
|
|
||||||
set -x
|
|
||||||
# Check if image already exists, if it does then skip building it
|
|
||||||
if docker manifest inspect "${DOCKER_IMAGE_BASE}:${DOCKER_TAG}"; then
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
if [[ "$BASE_REVISION" = "$(git rev-parse HEAD)" ]]; then
|
|
||||||
# if we're on the base branch then use the parent commit
|
|
||||||
MERGE_BASE=$(git rev-parse HEAD~)
|
|
||||||
else
|
|
||||||
# otherwise we're on a PR, so use the most recent base commit
|
|
||||||
MERGE_BASE=$(git merge-base HEAD "$BASE_REVISION")
|
|
||||||
fi
|
|
||||||
# Covers the case where a previous tag doesn't exist for the tree
|
|
||||||
# this is only really applicable on trees that don't have `.circleci/docker` at its merge base, i.e. nightly
|
|
||||||
if ! git rev-parse "$MERGE_BASE:.circleci/docker"; then
|
|
||||||
echo "Directory '.circleci/docker' not found in commit $MERGE_BASE, you should probably rebase onto a more recent commit"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
PREVIOUS_DOCKER_TAG=$(git rev-parse "$MERGE_BASE:.circleci/docker")
|
|
||||||
# If no image exists but the hash is the same as the previous hash then we should error out here
|
|
||||||
if [[ "${PREVIOUS_DOCKER_TAG}" = "${DOCKER_TAG}" ]]; then
|
|
||||||
echo "ERROR: Something has gone wrong and the previous image isn't available for the merge-base of your branch"
|
|
||||||
echo " contact the PyTorch team to restore the original images"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo ::set-output name=rebuild::yes
|
|
||||||
- name: Build and push docker image
|
|
||||||
if: steps.check.outputs.rebuild
|
|
||||||
env:
|
|
||||||
DOCKER_TAG: ${{ steps.calculate-tag.outputs.docker_tag }}
|
|
||||||
DOCKER_SKIP_S3_UPLOAD: 1
|
|
||||||
run: |
|
|
||||||
export IMAGE_NAME=${DOCKER_IMAGE_BASE#308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/}
|
|
||||||
cd .circleci/docker && ./build_docker.sh
|
|
||||||
build:
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
needs: calculate-docker-image
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Log in to ECR
|
|
||||||
run: |
|
|
||||||
aws ecr get-login --no-include-email --region us-east-1 > /tmp/ecr-login.sh
|
|
||||||
bash /tmp/ecr-login.sh
|
|
||||||
rm /tmp/ecr-login.sh
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0 # deep clone, to allow sharding to use git rev-list
|
|
||||||
submodules: recursive
|
|
||||||
- name: Pull docker image
|
|
||||||
run: |
|
|
||||||
docker pull "${DOCKER_IMAGE}"
|
|
||||||
- name: Preserve github env variables for use in docker
|
|
||||||
run: |
|
|
||||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
|
||||||
- name: Build PyTorch
|
|
||||||
run: |
|
|
||||||
docker run \
|
|
||||||
-e BUILD_ENVIRONMENT \
|
|
||||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
|
||||||
-e SCCACHE_BUCKET \
|
|
||||||
-e CUSTOM_TEST_ARTIFACT_BUILD_DIR \
|
|
||||||
-e SKIP_SCCACHE_INITIALIZATION=1 \
|
|
||||||
-e TORCH_CUDA_ARCH_LIST \
|
|
||||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
|
||||||
--security-opt seccomp=unconfined \
|
|
||||||
--cap-add=SYS_PTRACE \
|
|
||||||
--tty \
|
|
||||||
--user jenkins \
|
|
||||||
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
|
|
||||||
-w /var/lib/jenkins/workspace \
|
|
||||||
"${DOCKER_IMAGE}" \
|
|
||||||
sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh'
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Archive artifacts into zip
|
|
||||||
run: |
|
|
||||||
zip -r artifacts.zip dist/ build/
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
name: Store PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_ENVIRONMENT }}
|
|
||||||
retention-days: 30
|
|
||||||
if-no-files-found: error
|
|
||||||
path:
|
|
||||||
artifacts.zip
|
|
||||||
- name: Clean up docker images
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Prune all of the docker images
|
|
||||||
docker system prune -af
|
|
||||||
test:
|
|
||||||
runs-on: linux.8xlarge.nvidia.gpu
|
|
||||||
needs:
|
|
||||||
- calculate-docker-image
|
|
||||||
- build
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Log in to ECR
|
|
||||||
run: |
|
|
||||||
aws ecr get-login --no-include-email --region us-east-1 > /tmp/ecr-login.sh
|
|
||||||
bash /tmp/ecr-login.sh
|
|
||||||
rm /tmp/ecr-login.sh
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)/../":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Pull docker image
|
|
||||||
run: |
|
|
||||||
docker pull "${DOCKER_IMAGE}"
|
|
||||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
|
||||||
if: ${{ contains(env.BUILD_ENVIRONMENT, 'cuda') }}
|
|
||||||
run: |
|
|
||||||
bash .github/scripts/install_nvidia_utils_linux.sh
|
|
||||||
echo "GPU_FLAG=--gpus all" >> "${GITHUB_ENV}"
|
|
||||||
- name: Determine shm-size
|
|
||||||
run: |
|
|
||||||
shm_size="1g"
|
|
||||||
case "${BUILD_ENVIRONMENT}" in
|
|
||||||
*cuda*)
|
|
||||||
shm_size="2g"
|
|
||||||
;;
|
|
||||||
*rocm*)
|
|
||||||
shm_size="8g"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "SHM_SIZE=${shm_size}" >> "${GITHUB_ENV}"
|
|
||||||
- uses: actions/download-artifact@v2
|
|
||||||
name: Download PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_ENVIRONMENT }}
|
|
||||||
- name: Unzip artifacts
|
|
||||||
run: |
|
|
||||||
unzip -o artifacts.zip
|
|
||||||
- name: Output disk space left
|
|
||||||
run: |
|
|
||||||
sudo df -H
|
|
||||||
- name: Preserve github env variables for use in docker
|
|
||||||
run: |
|
|
||||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
|
||||||
- name: Test PyTorch
|
|
||||||
run: |
|
|
||||||
# TODO: Stop building test binaries as part of the build phase
|
|
||||||
# Used for GPU_FLAG since that doesn't play nice
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
docker run \
|
|
||||||
${GPU_FLAG:-} \
|
|
||||||
-e BUILD_ENVIRONMENT \
|
|
||||||
-e CUSTOM_TEST_ARTIFACT_BUILD_DIR \
|
|
||||||
-e IN_CI \
|
|
||||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
|
||||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
|
||||||
--security-opt seccomp=unconfined \
|
|
||||||
--cap-add=SYS_PTRACE \
|
|
||||||
--shm-size="${SHM_SIZE}" \
|
|
||||||
--tty \
|
|
||||||
--user jenkins \
|
|
||||||
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
|
|
||||||
-w /var/lib/jenkins/workspace \
|
|
||||||
"${DOCKER_IMAGE}" \
|
|
||||||
sh -c 'sudo chown -R jenkins . && pip install dist/*.whl && .jenkins/pytorch/test.sh'
|
|
||||||
- name: Chown workspace
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
name: Store PyTorch Test Reports
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: test-reports
|
|
||||||
retention-days: 30
|
|
||||||
if-no-files-found: error
|
|
||||||
path:
|
|
||||||
test/**/*.xml
|
|
||||||
- name: Clean up docker images
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
# Prune all of the docker images
|
|
||||||
docker system prune -af
|
|
||||||
render_test_results:
|
|
||||||
if: always()
|
|
||||||
needs:
|
|
||||||
- test
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
# deep clone, to allow tools/print_test_stats.py to use Git commands
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: actions/download-artifact@v2
|
|
||||||
name: Download PyTorch Test Reports
|
|
||||||
with:
|
|
||||||
name: test-reports
|
|
||||||
path: test/test-reports
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: 3.9
|
|
||||||
- name: Install dependencies
|
|
||||||
# boto3 version copied from .circleci/docker/common/install_conda.sh
|
|
||||||
run: |
|
|
||||||
pip install -r requirements.txt
|
|
||||||
pip install boto3==1.16.34 junitparser rich
|
|
||||||
- name: Output Test Results (Click Me)
|
|
||||||
run: |
|
|
||||||
python tools/render_junit.py test
|
|
||||||
- name: Parse ref
|
|
||||||
id: parse-ref
|
|
||||||
run: .github/scripts/parse_ref.py
|
|
||||||
- name: Display and upload test statistics (Click Me)
|
|
||||||
# temporary hack: set CIRCLE_* vars, until we update
|
|
||||||
# tools/print_test_stats.py to natively support GitHub Actions
|
|
||||||
env:
|
|
||||||
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
|
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_OSSCI_METRICS_SECRET_ACCESS_KEY }}
|
|
||||||
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
|
|
||||||
CIRCLE_JOB: pytorch-linux-xenial-cuda10.2-cudnn7-py3.6-gcc7
|
|
||||||
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
|
|
||||||
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
|
||||||
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}
|
|
||||||
CIRCLE_WORKFLOW_ID: ${{ github.run_id }} # dunno if this corresponds
|
|
||||||
run: |
|
|
||||||
export PYTHONPATH=$PWD
|
|
||||||
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test
|
|
||||||
@ -1,365 +0,0 @@
|
|||||||
# @generated DO NOT EDIT MANUALLY
|
|
||||||
# Template is at: .github/templates/linux_ci_workflow.yml
|
|
||||||
# Generation script: .github/scripts/generate_linux_ci_workflows.py
|
|
||||||
name: Linux CI (pytorch-linux-xenial-py3.6-gcc5.4)
|
|
||||||
|
|
||||||
on:
|
|
||||||
# TODO: Enable pull_request builds when we can verify capacity can be met by auto-scalers
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- release/*
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
BUILD_ENVIRONMENT: pytorch-linux-xenial-py3.6-gcc5.4
|
|
||||||
DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4
|
|
||||||
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
|
|
||||||
TORCH_CUDA_ARCH_LIST: 5.2
|
|
||||||
IN_CI: 1
|
|
||||||
# Used for custom_opertor, jit_hooks, custom_backend, see .jenkins/pytorch/build.sh
|
|
||||||
CUSTOM_TEST_ARTIFACT_BUILD_DIR: build/custom_test_artifacts
|
|
||||||
ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
calculate-docker-image:
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: 1
|
|
||||||
timeout-minutes: 90
|
|
||||||
outputs:
|
|
||||||
docker_image: ${{ steps.calculate-tag.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
# deep clone, to allow use of git merge-base
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Calculate docker image tag
|
|
||||||
id: calculate-tag
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=$(git rev-parse HEAD:.circleci/docker)
|
|
||||||
echo "::set-output name=docker_tag::${DOCKER_TAG}"
|
|
||||||
echo "::set-output name=docker_image::${DOCKER_IMAGE_BASE}:${DOCKER_TAG}"
|
|
||||||
- name: Check if image should be built
|
|
||||||
id: check
|
|
||||||
env:
|
|
||||||
DOCKER_TAG: ${{ steps.calculate-tag.outputs.docker_tag }}
|
|
||||||
BASE_REVISION: ${{ github.event.pull_request.base.sha || github.sha }}
|
|
||||||
run: |
|
|
||||||
eval "$(aws ecr get-login --no-include-email --region us-east-1)"
|
|
||||||
set -x
|
|
||||||
# Check if image already exists, if it does then skip building it
|
|
||||||
if docker manifest inspect "${DOCKER_IMAGE_BASE}:${DOCKER_TAG}"; then
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
if [[ "$BASE_REVISION" = "$(git rev-parse HEAD)" ]]; then
|
|
||||||
# if we're on the base branch then use the parent commit
|
|
||||||
MERGE_BASE=$(git rev-parse HEAD~)
|
|
||||||
else
|
|
||||||
# otherwise we're on a PR, so use the most recent base commit
|
|
||||||
MERGE_BASE=$(git merge-base HEAD "$BASE_REVISION")
|
|
||||||
fi
|
|
||||||
# Covers the case where a previous tag doesn't exist for the tree
|
|
||||||
# this is only really applicable on trees that don't have `.circleci/docker` at its merge base, i.e. nightly
|
|
||||||
if ! git rev-parse "$MERGE_BASE:.circleci/docker"; then
|
|
||||||
echo "Directory '.circleci/docker' not found in commit $MERGE_BASE, you should probably rebase onto a more recent commit"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
PREVIOUS_DOCKER_TAG=$(git rev-parse "$MERGE_BASE:.circleci/docker")
|
|
||||||
# If no image exists but the hash is the same as the previous hash then we should error out here
|
|
||||||
if [[ "${PREVIOUS_DOCKER_TAG}" = "${DOCKER_TAG}" ]]; then
|
|
||||||
echo "ERROR: Something has gone wrong and the previous image isn't available for the merge-base of your branch"
|
|
||||||
echo " contact the PyTorch team to restore the original images"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo ::set-output name=rebuild::yes
|
|
||||||
- name: Build and push docker image
|
|
||||||
if: steps.check.outputs.rebuild
|
|
||||||
env:
|
|
||||||
DOCKER_TAG: ${{ steps.calculate-tag.outputs.docker_tag }}
|
|
||||||
DOCKER_SKIP_S3_UPLOAD: 1
|
|
||||||
run: |
|
|
||||||
export IMAGE_NAME=${DOCKER_IMAGE_BASE#308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/}
|
|
||||||
cd .circleci/docker && ./build_docker.sh
|
|
||||||
build:
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
needs: calculate-docker-image
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Log in to ECR
|
|
||||||
run: |
|
|
||||||
aws ecr get-login --no-include-email --region us-east-1 > /tmp/ecr-login.sh
|
|
||||||
bash /tmp/ecr-login.sh
|
|
||||||
rm /tmp/ecr-login.sh
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0 # deep clone, to allow sharding to use git rev-list
|
|
||||||
submodules: recursive
|
|
||||||
- name: Pull docker image
|
|
||||||
run: |
|
|
||||||
docker pull "${DOCKER_IMAGE}"
|
|
||||||
- name: Preserve github env variables for use in docker
|
|
||||||
run: |
|
|
||||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
|
||||||
- name: Build PyTorch
|
|
||||||
run: |
|
|
||||||
docker run \
|
|
||||||
-e BUILD_ENVIRONMENT \
|
|
||||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
|
||||||
-e SCCACHE_BUCKET \
|
|
||||||
-e CUSTOM_TEST_ARTIFACT_BUILD_DIR \
|
|
||||||
-e SKIP_SCCACHE_INITIALIZATION=1 \
|
|
||||||
-e TORCH_CUDA_ARCH_LIST \
|
|
||||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
|
||||||
--security-opt seccomp=unconfined \
|
|
||||||
--cap-add=SYS_PTRACE \
|
|
||||||
--tty \
|
|
||||||
--user jenkins \
|
|
||||||
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
|
|
||||||
-w /var/lib/jenkins/workspace \
|
|
||||||
"${DOCKER_IMAGE}" \
|
|
||||||
sh -c 'sudo chown -R jenkins . && .jenkins/pytorch/build.sh'
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Archive artifacts into zip
|
|
||||||
run: |
|
|
||||||
zip -r artifacts.zip dist/ build/
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
name: Store PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_ENVIRONMENT }}
|
|
||||||
retention-days: 30
|
|
||||||
if-no-files-found: error
|
|
||||||
path:
|
|
||||||
artifacts.zip
|
|
||||||
- name: Clean up docker images
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Prune all of the docker images
|
|
||||||
docker system prune -af
|
|
||||||
test:
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
needs:
|
|
||||||
- calculate-docker-image
|
|
||||||
- build
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Log in to ECR
|
|
||||||
run: |
|
|
||||||
aws ecr get-login --no-include-email --region us-east-1 > /tmp/ecr-login.sh
|
|
||||||
bash /tmp/ecr-login.sh
|
|
||||||
rm /tmp/ecr-login.sh
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)/../":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
- name: Pull docker image
|
|
||||||
run: |
|
|
||||||
docker pull "${DOCKER_IMAGE}"
|
|
||||||
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
|
|
||||||
if: ${{ contains(env.BUILD_ENVIRONMENT, 'cuda') }}
|
|
||||||
run: |
|
|
||||||
bash .github/scripts/install_nvidia_utils_linux.sh
|
|
||||||
echo "GPU_FLAG=--gpus all" >> "${GITHUB_ENV}"
|
|
||||||
- name: Determine shm-size
|
|
||||||
run: |
|
|
||||||
shm_size="1g"
|
|
||||||
case "${BUILD_ENVIRONMENT}" in
|
|
||||||
*cuda*)
|
|
||||||
shm_size="2g"
|
|
||||||
;;
|
|
||||||
*rocm*)
|
|
||||||
shm_size="8g"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "SHM_SIZE=${shm_size}" >> "${GITHUB_ENV}"
|
|
||||||
- uses: actions/download-artifact@v2
|
|
||||||
name: Download PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_ENVIRONMENT }}
|
|
||||||
- name: Unzip artifacts
|
|
||||||
run: |
|
|
||||||
unzip -o artifacts.zip
|
|
||||||
- name: Output disk space left
|
|
||||||
run: |
|
|
||||||
sudo df -H
|
|
||||||
- name: Preserve github env variables for use in docker
|
|
||||||
run: |
|
|
||||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
|
||||||
- name: Test PyTorch
|
|
||||||
run: |
|
|
||||||
# TODO: Stop building test binaries as part of the build phase
|
|
||||||
# Used for GPU_FLAG since that doesn't play nice
|
|
||||||
# shellcheck disable=SC2086
|
|
||||||
docker run \
|
|
||||||
${GPU_FLAG:-} \
|
|
||||||
-e BUILD_ENVIRONMENT \
|
|
||||||
-e CUSTOM_TEST_ARTIFACT_BUILD_DIR \
|
|
||||||
-e IN_CI \
|
|
||||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
|
||||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
|
||||||
--security-opt seccomp=unconfined \
|
|
||||||
--cap-add=SYS_PTRACE \
|
|
||||||
--shm-size="${SHM_SIZE}" \
|
|
||||||
--tty \
|
|
||||||
--user jenkins \
|
|
||||||
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
|
|
||||||
-w /var/lib/jenkins/workspace \
|
|
||||||
"${DOCKER_IMAGE}" \
|
|
||||||
sh -c 'sudo chown -R jenkins . && pip install dist/*.whl && .jenkins/pytorch/test.sh'
|
|
||||||
- name: Chown workspace
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
name: Store PyTorch Test Reports
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: test-reports
|
|
||||||
retention-days: 30
|
|
||||||
if-no-files-found: error
|
|
||||||
path:
|
|
||||||
test/**/*.xml
|
|
||||||
- name: Clean up docker images
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
|
|
||||||
# Prune all of the docker images
|
|
||||||
docker system prune -af
|
|
||||||
render_test_results:
|
|
||||||
if: always()
|
|
||||||
needs:
|
|
||||||
- test
|
|
||||||
runs-on: ubuntu-18.04
|
|
||||||
steps:
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
# deep clone, to allow tools/print_test_stats.py to use Git commands
|
|
||||||
fetch-depth: 0
|
|
||||||
- uses: actions/download-artifact@v2
|
|
||||||
name: Download PyTorch Test Reports
|
|
||||||
with:
|
|
||||||
name: test-reports
|
|
||||||
path: test/test-reports
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: 3.9
|
|
||||||
- name: Install dependencies
|
|
||||||
# boto3 version copied from .circleci/docker/common/install_conda.sh
|
|
||||||
run: |
|
|
||||||
pip install -r requirements.txt
|
|
||||||
pip install boto3==1.16.34 junitparser rich
|
|
||||||
- name: Output Test Results (Click Me)
|
|
||||||
run: |
|
|
||||||
python tools/render_junit.py test
|
|
||||||
- name: Parse ref
|
|
||||||
id: parse-ref
|
|
||||||
run: .github/scripts/parse_ref.py
|
|
||||||
- name: Display and upload test statistics (Click Me)
|
|
||||||
# temporary hack: set CIRCLE_* vars, until we update
|
|
||||||
# tools/print_test_stats.py to natively support GitHub Actions
|
|
||||||
env:
|
|
||||||
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
|
|
||||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_OSSCI_METRICS_ACCESS_KEY_ID }}
|
|
||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_OSSCI_METRICS_SECRET_ACCESS_KEY }}
|
|
||||||
CIRCLE_BRANCH: ${{ steps.parse-ref.outputs.branch }}
|
|
||||||
CIRCLE_JOB: pytorch-linux-xenial-py3.6-gcc5.4
|
|
||||||
CIRCLE_PR_NUMBER: ${{ github.event.pull_request.number }}
|
|
||||||
CIRCLE_SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
|
||||||
CIRCLE_TAG: ${{ steps.parse-ref.outputs.tag }}
|
|
||||||
CIRCLE_WORKFLOW_ID: ${{ github.run_id }} # dunno if this corresponds
|
|
||||||
run: |
|
|
||||||
export PYTHONPATH=$PWD
|
|
||||||
python tools/print_test_stats.py --upload-to-s3 --compare-with-s3 test
|
|
||||||
pytorch_python_doc_build:
|
|
||||||
runs-on: linux.2xlarge
|
|
||||||
needs:
|
|
||||||
- calculate-docker-image
|
|
||||||
- build
|
|
||||||
env:
|
|
||||||
DOCKER_IMAGE: ${{ needs.calculate-docker-image.outputs.docker_image }}
|
|
||||||
steps:
|
|
||||||
- name: Log in to ECR
|
|
||||||
run: |
|
|
||||||
aws ecr get-login --no-include-email --region us-east-1 > /tmp/ecr-login.sh
|
|
||||||
bash /tmp/ecr-login.sh
|
|
||||||
rm /tmp/ecr-login.sh
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v alpine chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0 # deep clone, to allow sharding to use git rev-list
|
|
||||||
submodules: recursive
|
|
||||||
- name: Pull docker image
|
|
||||||
run: |
|
|
||||||
docker pull "${DOCKER_IMAGE}"
|
|
||||||
- name: Preserve github env variables for use in docker
|
|
||||||
run: |
|
|
||||||
env | grep '^GITHUB' > "/tmp/github_env_${GITHUB_RUN_ID}"
|
|
||||||
- uses: actions/download-artifact@v2
|
|
||||||
name: Download PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: ${{ env.BUILD_ENVIRONMENT }}
|
|
||||||
- name: Unzip artifacts
|
|
||||||
run: |
|
|
||||||
unzip -o artifacts.zip
|
|
||||||
- name: Build Python Doc in Docker
|
|
||||||
run: |
|
|
||||||
set -ex
|
|
||||||
time docker pull "${DOCKER_IMAGE}" > /dev/null
|
|
||||||
echo "${GITHUB_REF}"
|
|
||||||
ref=${GITHUB_REF##*/}
|
|
||||||
target=${ref//v}
|
|
||||||
docker run \
|
|
||||||
-e BUILD_ENVIRONMENT \
|
|
||||||
-e CUSTOM_TEST_ARTIFACT_BUILD_DIR \
|
|
||||||
-e IN_CI \
|
|
||||||
-e MAX_JOBS="$(nproc --ignore=2)" \
|
|
||||||
-e CIRCLE_SHA1="$GITHUB_SHA" \
|
|
||||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
|
||||||
--security-opt seccomp=unconfined \
|
|
||||||
--cap-add=SYS_PTRACE \
|
|
||||||
--name="$GITHUB_SHA" \
|
|
||||||
--tty \
|
|
||||||
--user jenkins \
|
|
||||||
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
|
|
||||||
-w /var/lib/jenkins/workspace \
|
|
||||||
"${DOCKER_IMAGE}" \
|
|
||||||
bash -c "sudo chown -R jenkins . && pip install dist/*.whl && ./.circleci/scripts/python_doc_push_script.sh docs/$target $target site"
|
|
||||||
- name: Chown workspace
|
|
||||||
run: |
|
|
||||||
# Ensure the working directory gets chowned back to the current user
|
|
||||||
docker run --rm -v "$(pwd)":/v -w /v alpine chown -R "$(id -u):$(id -g)" .
|
|
||||||
- name: Archive artifacts into zip
|
|
||||||
run: |
|
|
||||||
zip -r pytorch_github_io.zip "${GITHUB_WORKSPACE}/pytorch.github.io"
|
|
||||||
- uses: actions/upload-artifact@v2
|
|
||||||
name: Store PyTorch Build Artifacts
|
|
||||||
with:
|
|
||||||
name: pytorch_github_io
|
|
||||||
if-no-files-found: error
|
|
||||||
path: pytorch_github_io.zip
|
|
||||||
- name: Clean up docker images
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
# Prune all of the docker images
|
|
||||||
docker system prune -af
|
|
||||||
66
.github/workflows/run_torchbench.yml
vendored
66
.github/workflows/run_torchbench.yml
vendored
@ -1,66 +0,0 @@
|
|||||||
name: TorchBench CI (pytorch-linux-py3.7-cu102)
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
env:
|
|
||||||
PR_NUM: ${{ github.event.number }}
|
|
||||||
PR_BODY: ${{ github.event.pull_request.body }}
|
|
||||||
PR_BASE_SHA: ${{ github.event.pull_request.base.sha }}
|
|
||||||
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run-torchbench:
|
|
||||||
# We don't accept running on non-pytorch repos because of security concerns
|
|
||||||
# Only run the job when the body contains magic word "RUN_TORCHBENCH:"
|
|
||||||
if: ${{ github.repository_owner == 'pytorch' && contains(github.event.pull_request.body, 'RUN_TORCHBENCH:') }}
|
|
||||||
runs-on: [self-hosted, bm-runner]
|
|
||||||
# Set to 12 hours
|
|
||||||
timeout-minutes: 720
|
|
||||||
steps:
|
|
||||||
- name: Checkout PyTorch
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
path: pytorch
|
|
||||||
- name: Checkout TorchBench
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
repository: pytorch/benchmark
|
|
||||||
path: benchmark
|
|
||||||
- name: Create conda environment
|
|
||||||
run: |
|
|
||||||
conda create -y -n pr-ci python=3.7
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
. "${HOME}"/anaconda3/etc/profile.d/conda.sh
|
|
||||||
conda activate pr-ci
|
|
||||||
conda install -y numpy=1.17 requests=2.22 ninja pyyaml mkl mkl-include setuptools cmake cffi typing_extensions future six dataclasses pillow pytest tabulate
|
|
||||||
- name: Update self-hosted PyTorch
|
|
||||||
run: |
|
|
||||||
pushd "${HOME}"/pytorch
|
|
||||||
git fetch
|
|
||||||
popd
|
|
||||||
- name: Run TorchBench
|
|
||||||
run: |
|
|
||||||
pushd "${HOME}"/pytorch
|
|
||||||
PR_MERGE_BASE=$(git merge-base "$PR_BASE_SHA" "$PR_HEAD_SHA")
|
|
||||||
popd
|
|
||||||
PR_BODY_FILE=/tmp/pr-body.txt
|
|
||||||
echo "$PR_BODY" > ${PR_BODY_FILE}
|
|
||||||
# shellcheck disable=SC1091
|
|
||||||
. "${HOME}"/anaconda3/etc/profile.d/conda.sh
|
|
||||||
conda activate pr-ci
|
|
||||||
python3 pytorch/.github/scripts/run_torchbench.py \
|
|
||||||
--pytorch-path "${HOME}"/pytorch \
|
|
||||||
--torchbench-path "${PWD}"/benchmark \
|
|
||||||
--pr-num "$PR_NUM" \
|
|
||||||
--pr-base-sha "$PR_MERGE_BASE" \
|
|
||||||
--pr-head-sha "$PR_HEAD_SHA" \
|
|
||||||
--pr-body "$PR_BODY_FILE"
|
|
||||||
- name: Remove conda environment and cleanup
|
|
||||||
run: |
|
|
||||||
conda env remove --name pr-ci
|
|
||||||
rm /tmp/pr-body.txt
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
with:
|
|
||||||
name: TorchBench result
|
|
||||||
path: ~/.torchbench/bisection/pr${{ github.event.number }}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user