add new build files to gitignore; test that build doesn't leave repo dirty

Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/16441

Differential Revision: D13880053

Pulled By: ezyang

fbshipit-source-id: 0171f42438efdd651b6af22e521b80e85b12681c
This commit is contained in:
SsnL
2019-01-30 08:38:49 -08:00
committed by Facebook Github Bot
parent 7d7855ea31
commit 2d2eb7145a
15 changed files with 165 additions and 103 deletions

View File

@ -80,7 +80,7 @@ install_doc_push_script: &install_doc_push_script
popd
pushd docs
rm -rf source/torchvision
cp -r ../vision/docs/source source/torchvision
cp -a ../vision/docs/source source/torchvision
# Build the docs
pip -q install -r requirements.txt || true
@ -1242,12 +1242,13 @@ jobs:
export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V3}
export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V3}
git submodule sync && git submodule update -q --init
chmod a+x .jenkins/pytorch/macos-build.sh
unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts
mkdir -p /Users/distiller/pytorch-ci-env/workspace
cp -r /Users/distiller/project/. /Users/distiller/pytorch-ci-env/workspace
# copy with -a to preserve relative structure (e.g., symlinks), and be recursive
cp -a /Users/distiller/project/. /Users/distiller/pytorch-ci-env/workspace
- persist_to_workspace:
root: /Users/distiller/pytorch-ci-env
paths:
@ -1276,7 +1277,8 @@ jobs:
set -e
export IN_CIRCLECI=1
cp -r /Users/distiller/pytorch-ci-env/workspace/. /Users/distiller/project
# copy with -a to preserve relative structure (e.g., symlinks), and be recursive
cp -a /Users/distiller/pytorch-ci-env/workspace/. /Users/distiller/project
chmod a+x .jenkins/pytorch/macos-test.sh
unbuffer .jenkins/pytorch/macos-test.sh 2>&1 | ts

7
.gitignore vendored
View File

@ -22,7 +22,6 @@
aten/build/
aten/src/ATen/Config.h
aten/src/ATen/cuda/CUDAConfig.h
build/
caffe2/cpp_test/
dist/
docs/src/**/*
@ -62,16 +61,22 @@ torch/lib/*.dylib*
torch/lib/*.h
torch/lib/*.lib
torch/lib/*.so*
torch/lib/protobuf*.pc
torch/lib/build
torch/lib/caffe2/
torch/lib/cmake
torch/lib/include
torch/lib/pkgconfig
torch/lib/protoc
torch/lib/tmp_install
torch/lib/torch_shm_manager
torch/lib/site-packages/
torch/lib/python*
torch/share/
torch/version.py
# Root level file used in CI to specify certain env configs.
# E.g., see .circleci/config.yaml
env
# IPython notebook checkpoints
.ipynb_checkpoints

View File

@ -19,3 +19,5 @@ CC="clang" CXX="clang++" LDSHARED="clang --shared" \
CFLAGS="-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -shared-libasan" \
NO_CUDA=1 USE_MKLDNN=0 \
python setup.py install
assert_git_not_dirty

View File

@ -126,13 +126,11 @@ fi
# only use for "python setup.py install" line
if [[ "$BUILD_ENVIRONMENT" != *ppc64le* ]]; then
WERROR=1 python setup.py install
elif [[ "$BUILD_ENVIRONMENT" == *ppc64le* ]]; then
else
python setup.py install
fi
# Add the test binaries so that they won't be git clean'ed away
git add -f build/bin
assert_git_not_dirty
# Test documentation build
if [[ "$BUILD_ENVIRONMENT" == *xenial-cuda8-cudnn7-py3* ]]; then
@ -141,6 +139,7 @@ if [[ "$BUILD_ENVIRONMENT" == *xenial-cuda8-cudnn7-py3* ]]; then
pip install -q -r requirements.txt || true
LC_ALL=C make html
popd
assert_git_not_dirty
fi
# Test standalone c10 build
@ -150,6 +149,7 @@ if [[ "$BUILD_ENVIRONMENT" == *xenial-cuda8-cudnn7-py3* ]]; then
cmake ..
make -j
popd
assert_git_not_dirty
fi
# Test no-Python build
@ -172,6 +172,7 @@ if [[ "$BUILD_TEST_LIBTORCH" == "1" ]]; then
CMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" cmake "$CUSTOM_OP_TEST"
make VERBOSE=1
popd
assert_git_not_dirty
fi
# Test XLA build
@ -217,4 +218,5 @@ if [[ "${JOB_BASE_NAME}" == *xla* ]]; then
python setup.py install
popd
assert_git_not_dirty
fi

View File

@ -63,6 +63,20 @@ declare -f -t trap_add
trap_add cleanup EXIT
function assert_git_not_dirty() {
# TODO: we should add an option to `build_amd.py` that reverts the repo to
# an unmodified state.
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then
git_status=$(git status --porcelain)
if [[ $git_status ]]; then
echo "Build left local git repository checkout dirty"
echo "git status --porcelain:"
echo "${git_status}"
exit 1
fi
fi
}
if which sccache > /dev/null; then
# Save sccache logs to file
sccache --stop-server || true
@ -115,7 +129,8 @@ else
fi
if [[ "$BUILD_ENVIRONMENT" == *pytorch-linux-xenial-cuda9-cudnn7-py3 ]] || \
[[ "$BUILD_ENVIRONMENT" == *pytorch-linux-trusty-py3.6-gcc7* ]]; then
[[ "$BUILD_ENVIRONMENT" == *pytorch-linux-trusty-py3.6-gcc7* ]] || \
[[ "$BUILD_ENVIRONMENT" == *pytorch_macos* ]]; then
BUILD_TEST_LIBTORCH=1
else
BUILD_TEST_LIBTORCH=0

View File

@ -17,6 +17,7 @@ source ${PYTORCH_ENV_DIR}/miniconda3/bin/activate
conda install -y mkl mkl-include numpy pyyaml setuptools cmake cffi ninja
rm -rf ${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch*
git submodule sync --recursive
git submodule update --init --recursive
export CMAKE_PREFIX_PATH=${PYTORCH_ENV_DIR}/miniconda3/
@ -65,6 +66,8 @@ export IMAGE_COMMIT_TAG=${BUILD_ENVIRONMENT}-${IMAGE_COMMIT_ID}
python setup.py install
assert_git_not_dirty
# Upload torch binaries when the build job is finished
if [ -z "${IN_CIRCLECI}" ]; then
7z a ${IMAGE_COMMIT_TAG}.7z ${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch*

View File

@ -16,11 +16,12 @@ fi
export PATH="${PYTORCH_ENV_DIR}/miniconda3/bin:$PATH"
source ${PYTORCH_ENV_DIR}/miniconda3/bin/activate
conda install -y mkl mkl-include numpy pyyaml setuptools cmake cffi ninja six
pip install hypothesis librosa>=0.6.2 psutil
pip install -q hypothesis "librosa>=0.6.2" psutil
if [ -z "${IN_CIRCLECI}" ]; then
rm -rf ${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch*
fi
git submodule sync --recursive
git submodule update --init --recursive
export CMAKE_PREFIX_PATH=${PYTORCH_ENV_DIR}/miniconda3/
@ -52,31 +53,38 @@ fi
test_python_all() {
echo "Ninja version: $(ninja --version)"
python test/run_test.py --verbose
assert_git_not_dirty
}
test_cpp_api() {
test_libtorch() {
# C++ API
# NB: Install outside of source directory (at the same level as the root
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
# But still clean it before we perform our own build.
#
CPP_BUILD="$PWD/../cpp-build"
rm -rf $CPP_BUILD
mkdir -p $CPP_BUILD/caffe2
if [[ "$BUILD_TEST_LIBTORCH" == "1" ]]; then
# NB: Install outside of source directory (at the same level as the root
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
# But still clean it before we perform our own build.
BUILD_LIBTORCH_PY=$PWD/tools/build_libtorch.py
pushd $CPP_BUILD/caffe2
VERBOSE=1 DEBUG=1 python $BUILD_LIBTORCH_PY
popd
echo "Testing libtorch"
python tools/download_mnist.py --quiet -d test/cpp/api/mnist
CPP_BUILD="$PWD/../cpp-build"
rm -rf $CPP_BUILD
mkdir -p $CPP_BUILD/caffe2
# Unfortunately it seems like the test can't load from miniconda3
# without these paths being set
export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$PWD/miniconda3/lib"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$PWD/miniconda3/lib"
"$CPP_BUILD"/caffe2/bin/test_api
BUILD_LIBTORCH_PY=$PWD/tools/build_libtorch.py
pushd $CPP_BUILD/caffe2
VERBOSE=1 DEBUG=1 python $BUILD_LIBTORCH_PY
popd
python tools/download_mnist.py --quiet -d test/cpp/api/mnist
# Unfortunately it seems like the test can't load from miniconda3
# without these paths being set
export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$PWD/miniconda3/lib"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$PWD/miniconda3/lib"
TORCH_CPP_TEST_MNIST_PATH="test/cpp/api/mnist" "$CPP_BUILD"/caffe2/bin/test_api
assert_git_not_dirty
fi
}
test_custom_script_ops() {
@ -96,18 +104,19 @@ test_custom_script_ops() {
# Run tests C++-side and load the exported script module.
build/test_custom_ops ./model.pt
popd
assert_git_not_dirty
}
if [ -z "${JOB_BASE_NAME}" ] || [[ "${JOB_BASE_NAME}" == *-test ]]; then
test_python_all
test_cpp_api
test_libtorch
test_custom_script_ops
else
if [[ "${JOB_BASE_NAME}" == *-test1 ]]; then
test_python_all
elif [[ "${JOB_BASE_NAME}" == *-test2 ]]; then
test_cpp_api
test_libtorch
test_custom_script_ops
fi
fi

View File

@ -26,3 +26,4 @@ if [ -n "${IN_CIRCLECI}" ]; then
fi
time python test/run_test.py --verbose -i distributed
assert_git_not_dirty

View File

@ -12,7 +12,7 @@ test_cpu_speed_mnist () {
cd examples/mnist
pip install -r requirements.txt
pip install -q -r requirements.txt
# Download data
python main.py --epochs 0

View File

@ -12,7 +12,7 @@ test_gpu_speed_mnist () {
cd examples/mnist
pip install -r requirements.txt
pip install -q -r requirements.txt
# Download data
python main.py --epochs 0

View File

@ -7,7 +7,7 @@ cd .jenkins/pytorch/perf_test
echo "Running CPU perf test for PyTorch..."
pip install awscli
pip install -q awscli
# Set multipart_threshold to be sufficiently high, so that `aws s3 cp` is not a multipart read
# More info at https://github.com/aws/aws-cli/issues/2321

View File

@ -7,7 +7,7 @@ pushd .jenkins/pytorch/perf_test
echo "Running GPU perf test for PyTorch..."
pip install awscli
pip install -q awscli
# Set multipart_threshold to be sufficiently high, so that `aws s3 cp` is not a multipart read
# More info at https://github.com/aws/aws-cli/issues/2321

View File

@ -82,7 +82,7 @@ if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
export PYTORCH_TEST_WITH_ROCM=1
# ROCm CI is using Caffe2 docker images, which doesn't have several packages
# needed in testing. We install them here.
pip install -q psutil librosa>=0.6.2 --user
pip install -q psutil "librosa>=0.6.2" --user
fi
if [[ "${JOB_BASE_NAME}" == *-NO_AVX-* ]]; then
@ -93,10 +93,12 @@ fi
test_python_nn() {
time python test/run_test.py --include nn --verbose
assert_git_not_dirty
}
test_python_all_except_nn() {
time python test/run_test.py --exclude nn --verbose
assert_git_not_dirty
}
test_aten() {
@ -120,6 +122,7 @@ test_aten() {
ls build/bin
aten/tools/run_tests.sh build/bin
assert_git_not_dirty
fi
}
@ -139,19 +142,21 @@ test_torchvision() {
#time python setup.py install
pip install -q --user .
popd
rm -rf vision
}
test_libtorch() {
if [[ "$BUILD_TEST_LIBTORCH" == "1" ]]; then
echo "Testing libtorch"
CPP_BUILD="$PWD/../cpp-build"
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
"$CPP_BUILD"/caffe2/bin/test_jit
else
"$CPP_BUILD"/caffe2/bin/test_jit "[cpu]"
fi
python tools/download_mnist.py --quiet -d mnist
OMP_NUM_THREADS=2 "$CPP_BUILD"/caffe2/bin/test_api
echo "Testing libtorch"
CPP_BUILD="$PWD/../cpp-build"
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
"$CPP_BUILD"/caffe2/bin/test_jit
else
"$CPP_BUILD"/caffe2/bin/test_jit "[cpu]"
fi
python tools/download_mnist.py --quiet -d test/cpp/api/mnist
OMP_NUM_THREADS=2 TORCH_CPP_TEST_MNIST_PATH="test/cpp/api/mnist" "$CPP_BUILD"/caffe2/bin/test_api
assert_git_not_dirty
fi
}
@ -160,13 +165,14 @@ test_custom_script_ops() {
echo "Testing custom script operators"
CUSTOM_OP_BUILD="$PWD/../custom-op-build"
pushd test/custom_operator
cp -r "$CUSTOM_OP_BUILD" build
cp -a "$CUSTOM_OP_BUILD" build
# Run tests Python-side and export a script module.
python test_custom_ops.py -v
python model.py --export-script-module=model.pt
# Run tests C++-side and load the exported script module.
build/test_custom_ops ./model.pt
popd
assert_git_not_dirty
fi
}
@ -177,6 +183,7 @@ test_xla() {
python test/test_operations.py
python test/test_train_mnist.py --tidy
popd
assert_git_not_dirty
}
if [ -z "${JOB_BASE_NAME}" ] || [[ "${JOB_BASE_NAME}" == *-test ]]; then

View File

@ -17,9 +17,12 @@ if [[ ${JOB_NAME} == *"develop"* ]]; then
export IMAGE_COMMIT_TAG=develop-${IMAGE_COMMIT_TAG}
fi
mkdir -p ci_scripts/
export TMP_DIR="${PWD}/build/win_tmp"
export TMP_DIR_WIN=$(cygpath -w "${TMP_DIR}")
cat >ci_scripts/upload_image.py << EOL
mkdir -p $TMP_DIR/ci_scripts/
cat >$TMP_DIR/ci_scripts/upload_image.py << EOL
import os
import sys
@ -36,44 +39,44 @@ response = object_acl.put(ACL='public-read')
EOL
cat >ci_scripts/build_pytorch.bat <<EOL
cat >$TMP_DIR/ci_scripts/build_pytorch.bat <<EOL
set PATH=C:\\Program Files\\CMake\\bin;C:\\Program Files\\7-Zip;C:\\ProgramData\\chocolatey\\bin;C:\\Program Files\\Git\\cmd;C:\\Program Files\\Amazon\\AWSCLI;%PATH%
:: Install MKL
if "%REBUILD%"=="" (
if "%BUILD_ENVIRONMENT%"=="" (
curl -k https://s3.amazonaws.com/ossci-windows/mkl_2018.2.185.7z --output mkl.7z
curl -k https://s3.amazonaws.com/ossci-windows/mkl_2018.2.185.7z --output %TMP_DIR_WIN%\\mkl.7z
) else (
aws s3 cp s3://ossci-windows/mkl_2018.2.185.7z mkl.7z --quiet
aws s3 cp s3://ossci-windows/mkl_2018.2.185.7z %TMP_DIR_WIN%\\mkl.7z --quiet
)
7z x -aoa mkl.7z -omkl
7z x -aoa %TMP_DIR_WIN%\\mkl.7z -o%TMP_DIR_WIN%\\mkl
)
set CMAKE_INCLUDE_PATH=%cd%\\mkl\\include
set LIB=%cd%\\mkl\\lib;%LIB
set CMAKE_INCLUDE_PATH=%TMP_DIR_WIN%\\mkl\\include
set LIB=%TMP_DIR_WIN%\\mkl\\lib;%LIB
:: Install MAGMA
if "%REBUILD%"=="" (
if "%BUILD_ENVIRONMENT%"=="" (
curl -k https://s3.amazonaws.com/ossci-windows/magma_2.4.0_cuda90_release.7z --output magma_2.4.0_cuda90_release.7z
curl -k https://s3.amazonaws.com/ossci-windows/magma_2.4.0_cuda90_release.7z --output %TMP_DIR_WIN%\\magma_2.4.0_cuda90_release.7z
) else (
aws s3 cp s3://ossci-windows/magma_2.4.0_cuda90_release.7z magma_2.4.0_cuda90_release.7z --quiet
aws s3 cp s3://ossci-windows/magma_2.4.0_cuda90_release.7z %TMP_DIR_WIN%\\magma_2.4.0_cuda90_release.7z --quiet
)
7z x -aoa magma_2.4.0_cuda90_release.7z -omagma
7z x -aoa %TMP_DIR_WIN%\\magma_2.4.0_cuda90_release.7z -o%TMP_DIR_WIN%\\magma
)
set MAGMA_HOME=%cd%\\magma
set MAGMA_HOME=%TMP_DIR_WIN%\\magma
:: Install sccache
mkdir %CD%\\tmp_bin
mkdir %TMP_DIR_WIN%\\bin
if "%REBUILD%"=="" (
:check_sccache
%CD%\\tmp_bin\\sccache.exe --show-stats || (
%TMP_DIR_WIN%\\bin\\sccache.exe --show-stats || (
taskkill /im sccache.exe /f /t || ver > nul
del %CD%\\tmp_bin\\sccache.exe
del %TMP_DIR_WIN%\\bin\\sccache.exe
if "%BUILD_ENVIRONMENT%"=="" (
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output %CD%\\tmp_bin\\sccache.exe
curl -k https://s3.amazonaws.com/ossci-windows/sccache.exe --output %TMP_DIR_WIN%\\bin\\sccache.exe
) else (
aws s3 cp s3://ossci-windows/sccache.exe %CD%\\tmp_bin\\sccache.exe
aws s3 cp s3://ossci-windows/sccache.exe %TMP_DIR_WIN%\\bin\\sccache.exe
)
goto :check_sccache
)
@ -87,8 +90,8 @@ if "%BUILD_ENVIRONMENT%"=="" (
)
if "%REBUILD%"=="" (
IF EXIST %CONDA_PARENT_DIR%\\Miniconda3 ( rd /s /q %CONDA_PARENT_DIR%\\Miniconda3 )
curl -k https://repo.continuum.io/miniconda/Miniconda3-latest-Windows-x86_64.exe -O
.\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_PARENT_DIR%\\Miniconda3
curl -k https://repo.continuum.io/miniconda/Miniconda3-latest-Windows-x86_64.exe --output %TMP_DIR_WIN%\\Miniconda3-latest-Windows-x86_64.exe
%TMP_DIR_WIN%\\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_PARENT_DIR%\\Miniconda3
)
call %CONDA_PARENT_DIR%\\Miniconda3\\Scripts\\activate.bat %CONDA_PARENT_DIR%\\Miniconda3
if "%REBUILD%"=="" (
@ -97,16 +100,17 @@ if "%REBUILD%"=="" (
)
:: Install ninja
if "%REBUILD%"=="" ( pip install ninja )
if "%REBUILD%"=="" ( pip install -q ninja )
set WORKING_DIR=%CD%
call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" x64
call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" x86_amd64
cd %WORKING_DIR%
git submodule sync --recursive
git submodule update --init --recursive
set PATH=%CD%\\tmp_bin;C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.0\\bin;C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.0\\libnvvp;%PATH%
set PATH=%TMP_DIR_WIN%\\bin;C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.0\\bin;C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.0\\libnvvp;%PATH%
set CUDA_PATH=C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.0
set CUDA_PATH_V9_0=C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.0
set NVTOOLSEXT_PATH=C:\\Program Files\\NVIDIA Corporation\\NvToolsExt
@ -146,10 +150,10 @@ if not "%USE_CUDA%"=="0" (
del /S /Q %%i
)
)
copy %CD%\\tmp_bin\\sccache.exe tmp_bin\\nvcc.exe
copy %TMP_DIR_WIN%\\bin\\sccache.exe %TMP_DIR_WIN%\\bin\\nvcc.exe
)
set CUDA_NVCC_EXECUTABLE=%CD%\\tmp_bin\\nvcc
set CUDA_NVCC_EXECUTABLE=%TMP_DIR_WIN%\\bin\\nvcc
if "%REBUILD%"=="" set NO_CUDA=0
@ -158,15 +162,18 @@ if not "%USE_CUDA%"=="0" (
echo NOTE: To run \`import torch\`, please make sure to activate the conda environment by running \`call %CONDA_PARENT_DIR%\\Miniconda3\\Scripts\\activate.bat %CONDA_PARENT_DIR%\\Miniconda3\` in Command Prompt before running Git Bash.
) else (
mv %CD%\\build\\bin\\test_api.exe %CONDA_PARENT_DIR%\\Miniconda3\\Lib\\site-packages\\torch\\lib
7z a %IMAGE_COMMIT_TAG%.7z %CONDA_PARENT_DIR%\\Miniconda3\\Lib\\site-packages\\torch %CONDA_PARENT_DIR%\\Miniconda3\\Lib\\site-packages\\caffe2 && python ci_scripts\\upload_image.py %IMAGE_COMMIT_TAG%.7z
7z a %TMP_DIR_WIN%\\%IMAGE_COMMIT_TAG%.7z %CONDA_PARENT_DIR%\\Miniconda3\\Lib\\site-packages\\torch %CONDA_PARENT_DIR%\\Miniconda3\\Lib\\site-packages\\caffe2 && python %TMP_DIR_WIN%\\ci_scripts\\upload_image.py %TMP_DIR_WIN%\\%IMAGE_COMMIT_TAG%.7z
)
)
)
EOL
ci_scripts/build_pytorch.bat
if [ ! -f $IMAGE_COMMIT_TAG.7z ] && [ ! ${BUILD_ENVIRONMENT} == "" ]; then
$TMP_DIR/ci_scripts/build_pytorch.bat
assert_git_not_dirty
if [ ! -f ${TMP_DIR}/${IMAGE_COMMIT_TAG}.7z ] && [ ! ${BUILD_ENVIRONMENT} == "" ]; then
exit 1
fi
echo "BUILD PASSED"

View File

@ -8,9 +8,12 @@ if [[ ${JOB_NAME} == *"develop"* ]]; then
export IMAGE_COMMIT_TAG=develop-${IMAGE_COMMIT_TAG}
fi
mkdir -p ci_scripts/
export TMP_DIR="${PWD}/build/win_tmp"
export TMP_DIR_WIN=$(cygpath -w "${TMP_DIR}")
mkdir -p $TMP_DIR/ci_scripts/
mkdir -p $TMP_DIR/build/torch
cat >ci_scripts/download_image.py << EOL
cat >$TMP_DIR/ci_scripts/download_image.py << EOL
import os
import sys
@ -34,7 +37,7 @@ except botocore.exceptions.ClientError as e:
EOL
cat >ci_scripts/setup_pytorch_env.bat <<EOL
cat >$TMP_DIR/ci_scripts/setup_pytorch_env.bat <<EOL
set PATH=C:\\Program Files\\CMake\\bin;C:\\Program Files\\7-Zip;C:\\ProgramData\\chocolatey\\bin;C:\\Program Files\\Git\\cmd;C:\\Program Files\\Amazon\\AWSCLI;%PATH%
@ -46,15 +49,15 @@ if "%BUILD_ENVIRONMENT%"=="" (
)
if NOT "%BUILD_ENVIRONMENT%"=="" (
IF EXIST %CONDA_PARENT_DIR%\\Miniconda3 ( rd /s /q %CONDA_PARENT_DIR%\\Miniconda3 )
curl https://repo.continuum.io/miniconda/Miniconda3-latest-Windows-x86_64.exe -O
.\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_PARENT_DIR%\\Miniconda3
curl https://repo.continuum.io/miniconda/Miniconda3-latest-Windows-x86_64.exe --output %TMP_DIR_WIN%\\Miniconda3-latest-Windows-x86_64.exe
%TMP_DIR_WIN%\\Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /RegisterPython=0 /S /AddToPath=0 /D=%CONDA_PARENT_DIR%\\Miniconda3
)
call %CONDA_PARENT_DIR%\\Miniconda3\\Scripts\\activate.bat %CONDA_PARENT_DIR%\\Miniconda3
if NOT "%BUILD_ENVIRONMENT%"=="" (
:: We have to pin Python version to 3.6.7, until mkl supports Python 3.7
call conda install -y -q python=3.6.7 numpy mkl cffi pyyaml boto3 protobuf
)
pip install ninja future hypothesis librosa>=0.6.2 psutil
pip install -q ninja future hypothesis "librosa>=0.6.2" psutil
set WORKING_DIR=%CD%
call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" x86_amd64
@ -67,21 +70,22 @@ set NVTOOLSEXT_PATH=C:\\Program Files\\NVIDIA Corporation\\NvToolsExt
set CUDNN_LIB_DIR=C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.0\\lib\\x64
set CUDA_TOOLKIT_ROOT_DIR=C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.0
set CUDNN_ROOT_DIR=C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.0
set PYTHONPATH=%CD%\\test;%PYTHONPATH%
set PYTHONPATH=%TMP_DIR_WIN%\\build;%PYTHONPATH%
if NOT "%BUILD_ENVIRONMENT%"=="" (
cd test/
python ..\\ci_scripts\\download_image.py %IMAGE_COMMIT_TAG%.7z
7z x %IMAGE_COMMIT_TAG%.7z
cd ..
cd %TMP_DIR_WIN%\\build
python %TMP_DIR_WIN%\\ci_scripts\\download_image.py %TMP_DIR_WIN%\\%IMAGE_COMMIT_TAG%.7z
:: 7z: `-aos` skips if exists because this .bat can be called multiple times
7z x %TMP_DIR_WIN%\\%IMAGE_COMMIT_TAG%.7z -aos
cd %WORKING_DIR%
) else (
xcopy /s %CONDA_PARENT_DIR%\\Miniconda3\\Lib\\site-packages\\torch .\\test\\torch\\
xcopy /s %CONDA_PARENT_DIR%\\Miniconda3\\Lib\\site-packages\\torch %TMP_DIR_WIN%\\build\\torch\\
)
EOL
cat >ci_scripts/test_python_nn.bat <<EOL
call ci_scripts/setup_pytorch_env.bat
cat >$TMP_DIR/ci_scripts/test_python_nn.bat <<EOL
call %TMP_DIR%/ci_scripts/setup_pytorch_env.bat
:: Some smoke tests
cd test/
:: Checking that caffe2.python is available
@ -105,13 +109,13 @@ cd ..
cd test/ && python run_test.py --include nn --verbose && cd ..
EOL
cat >ci_scripts/test_python_all_except_nn.bat <<EOL
call ci_scripts/setup_pytorch_env.bat
cat >$TMP_DIR/ci_scripts/test_python_all_except_nn.bat <<EOL
call %TMP_DIR%/ci_scripts/setup_pytorch_env.bat
cd test/ && python run_test.py --exclude nn --verbose && cd ..
EOL
cat >ci_scripts/test_custom_script_ops.bat <<EOL
call ci_scripts/setup_pytorch_env.bat
cat >$TMP_DIR/ci_scripts/test_custom_script_ops.bat <<EOL
call %TMP_DIR%/ci_scripts/setup_pytorch_env.bat
cd test/custom_operator
@ -119,7 +123,7 @@ cd test/custom_operator
mkdir build
cd build
:: Note: Caffe2 does not support MSVC + CUDA + Debug mode (has to be Release mode)
cmake -DCMAKE_PREFIX_PATH=%CD%\\..\\..\\torch -DCMAKE_BUILD_TYPE=Release -GNinja ..
cmake -DCMAKE_PREFIX_PATH=%TMP_DIR_WIN%\\build\\torch -DCMAKE_BUILD_TYPE=Release -GNinja ..
ninja -v
cd ..
@ -128,31 +132,36 @@ python test_custom_ops.py -v
python model.py --export-script-module="build/model.pt"
:: Run tests C++-side and load the exported script module.
cd build
set PATH=C:\\Program Files\\NVIDIA Corporation\\NvToolsExt/bin/x64;%CD%\\..\\..\\torch\\lib;%PATH%
set PATH=C:\\Program Files\\NVIDIA Corporation\\NvToolsExt/bin/x64;%TMP_DIR_WIN%\\build\\torch\\lib;%PATH%
test_custom_ops.exe model.pt
EOL
cat >ci_scripts/test_libtorch.bat <<EOL
call ci_scripts/setup_pytorch_env.bat
cat >$TMP_DIR/ci_scripts/test_libtorch.bat <<EOL
call %TMP_DIR%/ci_scripts/setup_pytorch_env.bat
dir
dir %CD%\\test
dir %CD%\\test\\torch
dir %CD%\\test\\torch\\lib
cd %CD%\\test\\torch\\lib
set PATH=C:\\Program Files\\NVIDIA Corporation\\NvToolsExt/bin/x64;%CD%\\..\\..\\torch\\lib;%PATH%
dir %TMP_DIR_WIN%\\build
dir %TMP_DIR_WIN%\\build\\torch
dir %TMP_DIR_WIN%\\build\\torch\\lib
cd %TMP_DIR_WIN%\\build\\torch\\lib
set PATH=C:\\Program Files\\NVIDIA Corporation\\NvToolsExt/bin/x64;%TMP_DIR_WIN%\\build\\torch\\lib;%PATH%
test_api.exe --gtest_filter="-IntegrationTest.MNIST*"
EOL
run_tests() {
if [ -z "${JOB_BASE_NAME}" ] || [[ "${JOB_BASE_NAME}" == *-test ]]; then
ci_scripts/test_python_nn.bat && ci_scripts/test_python_all_except_nn.bat && ci_scripts/test_custom_script_ops.bat && ci_scripts/test_libtorch.bat
$TMP_DIR/ci_scripts/test_python_nn.bat && \
$TMP_DIR/ci_scripts/test_python_all_except_nn.bat && \
$TMP_DIR/ci_scripts/test_custom_script_ops.bat && \
$TMP_DIR/ci_scripts/test_libtorch.bat
else
if [[ "${JOB_BASE_NAME}" == *-test1 ]]; then
ci_scripts/test_python_nn.bat
$TMP_DIR/ci_scripts/test_python_nn.bat
elif [[ "${JOB_BASE_NAME}" == *-test2 ]]; then
ci_scripts/test_python_all_except_nn.bat && ci_scripts/test_custom_script_ops.bat && ci_scripts/test_libtorch.bat
$TMP_DIR/ci_scripts/test_python_all_except_nn.bat && \
$TMP_DIR/ci_scripts/test_custom_script_ops.bat && \
$TMP_DIR/ci_scripts/test_libtorch.bat
fi
fi
}
run_tests && echo "TEST PASSED"
run_tests && assert_git_not_dirty && echo "TEST PASSED"