Don't globally disable any ShellCheck warnings (#55165)

Summary:
https://github.com/pytorch/pytorch/issues/47786 updated ShellCheck and fixed the warnings that it was already giving in CI (since it previously didn't cause the job to fail). https://github.com/pytorch/pytorch/issues/54069 enabled two ShellCheck warnings that previously were globally disabled. This PR continues the trend by reenabling the remaining four ShellCheck warnings that previously were globally disabled.

Also, this PR puts as many remaining ShellCheck arguments as possible into `.shellcheckrc` to make it easier to integrate with editors. For instance, in VS Code, this is now all that is needed (due to https://github.com/koalaman/shellcheck/issues/1818 and the fact that VS Code only runs ShellCheck on one file at a time):

```json
{
  "shellcheck.customArgs": [
    "--external-sources"
  ]
}
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/55165

Test Plan:
[The "Lint / quick-checks" job in GitHub Actions](https://github.com/pytorch/pytorch/pull/55165/checks?check_run_id=2250098330), or this command if you want to check locally:
```
.jenkins/run-shellcheck.sh
```

Reviewed By: walterddr

Differential Revision: D27514119

Pulled By: samestep

fbshipit-source-id: f00744b2cb90a2ab9aa05957bff32852485a351f
This commit is contained in:
Sam Estep
2021-04-02 10:40:12 -07:00
committed by Facebook GitHub Bot
parent 978fca64a6
commit 09670c7d43
23 changed files with 121 additions and 101 deletions

View File

@ -1,4 +1,4 @@
disable=SC2086
disable=SC2155
disable=SC2164
disable=SC1003
source-path=SCRIPTDIR
# we'd like to enable --external-sources here but can't
# https://github.com/koalaman/shellcheck/issues/1818

View File

@ -14,7 +14,8 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
echo "Clang version:"
clang --version
export LLVM_DIR="$(llvm-config-5.0 --prefix)"
LLVM_DIR="$(llvm-config-5.0 --prefix)"
export LLVM_DIR
echo "LLVM_DIR: ${LLVM_DIR}"
time ANALYZE_TEST=1 CHECK_RESULT=1 tools/code_analyzer/build.sh

View File

@ -27,7 +27,8 @@ retry pip install --pre torch torchvision \
if [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-static* ]]; then
TEST_CUSTOM_BUILD_STATIC=1 test/mobile/custom_build/build.sh
elif [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-dynamic* ]]; then
export LLVM_DIR="$(llvm-config-5.0 --prefix)"
LLVM_DIR="$(llvm-config-5.0 --prefix)"
export LLVM_DIR
echo "LLVM_DIR: ${LLVM_DIR}"
TEST_CUSTOM_BUILD_DYNAMIC=1 test/mobile/custom_build/build.sh
else

View File

@ -281,7 +281,7 @@ else
BUILD_LIBTORCH_PY=$PWD/tools/build_libtorch.py
mkdir -p ../cpp-build/caffe2
pushd ../cpp-build/caffe2
WERROR=1 VERBOSE=1 DEBUG=1 python $BUILD_LIBTORCH_PY
WERROR=1 VERBOSE=1 DEBUG=1 python "$BUILD_LIBTORCH_PY"
popd
fi
fi
@ -309,10 +309,11 @@ if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
exit 1
fi
bazels3cache --bucket=${XLA_CLANG_CACHE_S3_BUCKET_NAME} --maxEntrySizeBytes=0
bazels3cache --bucket="${XLA_CLANG_CACHE_S3_BUCKET_NAME}" --maxEntrySizeBytes=0
pushd xla
export CC=clang-9 CXX=clang++-9
# Use cloud cache to build when available.
# shellcheck disable=SC1003
sed -i '/bazel build/ a --remote_http_cache=http://localhost:7777 \\' build_torch_xla_libs.sh
python setup.py install

View File

@ -16,7 +16,7 @@ fi
# The way this is done is by detecting the command of the parent pid of the current process and checking whether
# that is sccache, and wrapping sccache around the process if its parent were not already sccache.
function write_sccache_stub() {
printf "#!/bin/sh\nif [ \$(ps auxc \$(ps auxc -o ppid \$\$ | grep \$\$ | rev | cut -d' ' -f1 | rev) | tr '\\\\n' ' ' | rev | cut -d' ' -f2 | rev) != sccache ]; then\n exec sccache %s \"\$@\"\nelse\n exec %s \"\$@\"\nfi" "$(which $1)" "$(which $1)" > "${WORKSPACE_DIR}/$1"
printf "#!/bin/sh\nif [ \$(ps auxc \$(ps auxc -o ppid \$\$ | grep \$\$ | rev | cut -d' ' -f1 | rev) | tr '\\\\n' ' ' | rev | cut -d' ' -f2 | rev) != sccache ]; then\n exec sccache %s \"\$@\"\nelse\n exec %s \"\$@\"\nfi" "$(which "$1")" "$(which "$1")" > "${WORKSPACE_DIR}/$1"
chmod a+x "${WORKSPACE_DIR}/$1"
}
@ -38,6 +38,6 @@ assert_git_not_dirty
# Upload torch binaries when the build job is finished
if [ -z "${IN_CI}" ]; then
7z a ${IMAGE_COMMIT_TAG}.7z ${WORKSPACE_DIR}/miniconda3/lib/python3.6/site-packages/torch*
aws s3 cp ${IMAGE_COMMIT_TAG}.7z s3://ossci-macos-build/pytorch/${IMAGE_COMMIT_TAG}.7z --acl public-read
7z a "${IMAGE_COMMIT_TAG}".7z "${WORKSPACE_DIR}"/miniconda3/lib/python3.6/site-packages/torch*
aws s3 cp "${IMAGE_COMMIT_TAG}".7z s3://ossci-macos-build/pytorch/"${IMAGE_COMMIT_TAG}".7z --acl public-read
fi

View File

@ -11,7 +11,7 @@ COMPACT_JOB_NAME="${BUILD_ENVIRONMENT}"
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
export PATH="/usr/local/bin:$PATH"
export WORKSPACE_DIR="${HOME}/workspace"
mkdir -p ${WORKSPACE_DIR}
mkdir -p "${WORKSPACE_DIR}"
if [[ "${COMPACT_JOB_NAME}" == *arm64* ]]; then
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-py38_4.9.2-MacOSX-x86_64.sh"
@ -21,13 +21,13 @@ fi
# If a local installation of conda doesn't exist, we download and install conda
if [ ! -d "${WORKSPACE_DIR}/miniconda3" ]; then
mkdir -p ${WORKSPACE_DIR}
curl --retry 3 ${MINICONDA_URL} -o ${WORKSPACE_DIR}/miniconda3.sh
retry bash ${WORKSPACE_DIR}/miniconda3.sh -b -p ${WORKSPACE_DIR}/miniconda3
mkdir -p "${WORKSPACE_DIR}"
curl --retry 3 ${MINICONDA_URL} -o "${WORKSPACE_DIR}"/miniconda3.sh
retry bash "${WORKSPACE_DIR}"/miniconda3.sh -b -p "${WORKSPACE_DIR}"/miniconda3
fi
export PATH="${WORKSPACE_DIR}/miniconda3/bin:$PATH"
# shellcheck disable=SC1090
source ${WORKSPACE_DIR}/miniconda3/bin/activate
source "${WORKSPACE_DIR}"/miniconda3/bin/activate
retry conda install -y mkl mkl-include numpy=1.18.5 pyyaml=5.3 setuptools=46.0.0 cmake cffi ninja typing_extensions dataclasses pip
# The torch.hub tests make requests to GitHub.
#

View File

@ -13,7 +13,7 @@ pip install -q hypothesis "librosa>=0.6.2" "numba<=0.49.1" psutil
pip install unittest-xml-reporting pytest
if [ -z "${IN_CI}" ]; then
rm -rf ${WORKSPACE_DIR}/miniconda3/lib/python3.6/site-packages/torch*
rm -rf "${WORKSPACE_DIR}"/miniconda3/lib/python3.6/site-packages/torch*
fi
export CMAKE_PREFIX_PATH=${WORKSPACE_DIR}/miniconda3/
@ -31,9 +31,9 @@ fi
# Download torch binaries in the test jobs
if [ -z "${IN_CI}" ]; then
rm -rf ${WORKSPACE_DIR}/miniconda3/lib/python3.6/site-packages/torch*
aws s3 cp s3://ossci-macos-build/pytorch/${IMAGE_COMMIT_TAG}.7z ${IMAGE_COMMIT_TAG}.7z
7z x ${IMAGE_COMMIT_TAG}.7z -o"${WORKSPACE_DIR}/miniconda3/lib/python3.6/site-packages"
rm -rf "${WORKSPACE_DIR}"/miniconda3/lib/python3.6/site-packages/torch*
aws s3 cp s3://ossci-macos-build/pytorch/"${IMAGE_COMMIT_TAG}".7z "${IMAGE_COMMIT_TAG}".7z
7z x "${IMAGE_COMMIT_TAG}".7z -o"${WORKSPACE_DIR}/miniconda3/lib/python3.6/site-packages"
fi
# Test that OpenMP is enabled
@ -75,12 +75,12 @@ test_libtorch() {
echo "Testing libtorch"
CPP_BUILD="$PWD/../cpp-build"
rm -rf $CPP_BUILD
mkdir -p $CPP_BUILD/caffe2
rm -rf "$CPP_BUILD"
mkdir -p "$CPP_BUILD"/caffe2
BUILD_LIBTORCH_PY=$PWD/tools/build_libtorch.py
pushd $CPP_BUILD/caffe2
VERBOSE=1 DEBUG=1 python $BUILD_LIBTORCH_PY
pushd "$CPP_BUILD"/caffe2
VERBOSE=1 DEBUG=1 python "$BUILD_LIBTORCH_PY"
popd
python tools/download_mnist.py --quiet -d test/cpp/api/mnist

View File

@ -1,4 +1,5 @@
#!/bin/bash
set -e
run_test () {
rm -rf test_tmp/ && mkdir test_tmp/ && cd test_tmp/
@ -17,5 +18,5 @@ get_runtime_of_command () {
runtime=${runtime#+++ $@}
runtime=$(python -c "print($runtime)")
echo $runtime
echo "$runtime"
}

View File

@ -1,4 +1,5 @@
#!/bin/bash
set -e
. ./common.sh
@ -28,12 +29,12 @@ test_cpu_speed_mini_sequence_labeler () {
stats=$(python ../get_stats.py "${SAMPLE_ARRAY[@]}")
echo "Runtime stats in seconds:"
echo $stats
echo "$stats"
if [ "$2" == "compare_with_baseline" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}"
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}"
elif [ "$2" == "compare_and_update" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}" --update
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}" --update
fi
}

View File

@ -1,4 +1,5 @@
#!/bin/bash
set -e
. ./common.sh
@ -22,7 +23,7 @@ test_cpu_speed_mnist () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python main.py --epochs 1 --no-log)
echo $runtime
echo "$runtime"
SAMPLE_ARRAY+=("${runtime}")
done
@ -30,12 +31,12 @@ test_cpu_speed_mnist () {
stats=$(python ../get_stats.py "${SAMPLE_ARRAY[@]}")
echo "Runtime stats in seconds:"
echo $stats
echo "$stats"
if [ "$2" == "compare_with_baseline" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}"
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}"
elif [ "$2" == "compare_and_update" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}" --update
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}" --update
fi
}

View File

@ -11,14 +11,14 @@ test_cpu_speed_torch () {
git clone https://github.com/yf225/perf-tests.git
if [ "$1" == "compare_with_baseline" ]; then
export ARGS="--compare ../cpu_runtime.json"
export ARGS=(--compare ../cpu_runtime.json)
elif [ "$1" == "compare_and_update" ]; then
export ARGS="--compare ../cpu_runtime.json --update ../new_cpu_runtime.json"
export ARGS=(--compare ../cpu_runtime.json --update ../new_cpu_runtime.json)
elif [ "$1" == "update_only" ]; then
export ARGS="--update ../new_cpu_runtime.json"
export ARGS=(--update ../new_cpu_runtime.json)
fi
if ! python perf-tests/modules/test_cpu_torch.py ${ARGS}; then
if ! python perf-tests/modules/test_cpu_torch.py "${ARGS[@]}"; then
echo "To reproduce this regression, run \`cd .jenkins/pytorch/perf_test/ && bash ${FUNCNAME[0]}.sh\` on your local machine and compare the runtime before/after your code change."
exit 1
fi

View File

@ -11,14 +11,14 @@ test_cpu_speed_torch_tensor () {
git clone https://github.com/yf225/perf-tests.git
if [ "$1" == "compare_with_baseline" ]; then
export ARGS="--compare ../cpu_runtime.json"
export ARGS=(--compare ../cpu_runtime.json)
elif [ "$1" == "compare_and_update" ]; then
export ARGS="--compare ../cpu_runtime.json --update ../new_cpu_runtime.json"
export ARGS=(--compare ../cpu_runtime.json --update ../new_cpu_runtime.json)
elif [ "$1" == "update_only" ]; then
export ARGS="--update ../new_cpu_runtime.json"
export ARGS=(--update ../new_cpu_runtime.json)
fi
if ! python perf-tests/modules/test_cpu_torch_tensor.py ${ARGS}; then
if ! python perf-tests/modules/test_cpu_torch_tensor.py "${ARGS[@]}"; then
echo "To reproduce this regression, run \`cd .jenkins/pytorch/perf_test/ && bash ${FUNCNAME[0]}.sh\` on your local machine and compare the runtime before/after your code change."
exit 1
fi

View File

@ -1,4 +1,5 @@
#!/bin/bash
set -e
. ./common.sh
@ -21,7 +22,7 @@ test_gpu_speed_cudnn_lstm () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python cudnn_lstm.py --skip-cpu-governor-check)
echo $runtime
echo "$runtime"
SAMPLE_ARRAY+=("${runtime}")
done
@ -29,12 +30,12 @@ test_gpu_speed_cudnn_lstm () {
stats=$(python ../get_stats.py "${SAMPLE_ARRAY[@]}")
echo "Runtime stats in seconds:"
echo $stats
echo "$stats"
if [ "$2" == "compare_with_baseline" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}"
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}"
elif [ "$2" == "compare_and_update" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}" --update
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}" --update
fi
}

View File

@ -1,4 +1,5 @@
#!/bin/bash
set -e
. ./common.sh
@ -21,7 +22,7 @@ test_gpu_speed_lstm () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python lstm.py --skip-cpu-governor-check)
echo $runtime
echo "$runtime"
SAMPLE_ARRAY+=("${runtime}")
done
@ -29,12 +30,12 @@ test_gpu_speed_lstm () {
stats=$(python ../get_stats.py "${SAMPLE_ARRAY[@]}")
echo "Runtime stats in seconds:"
echo $stats
echo "$stats"
if [ "$2" == "compare_with_baseline" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}"
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}"
elif [ "$2" == "compare_and_update" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}" --update
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}" --update
fi
}

View File

@ -1,4 +1,5 @@
#!/bin/bash
set -e
. ./common.sh
@ -21,7 +22,7 @@ test_gpu_speed_mlstm () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python mlstm.py --skip-cpu-governor-check)
echo $runtime
echo "$runtime"
SAMPLE_ARRAY+=("${runtime}")
done
@ -29,12 +30,12 @@ test_gpu_speed_mlstm () {
stats=$(python ../get_stats.py "${SAMPLE_ARRAY[@]}")
echo "Runtime stats in seconds:"
echo $stats
echo "$stats"
if [ "$2" == "compare_with_baseline" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}"
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}"
elif [ "$2" == "compare_and_update" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}" --update
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}" --update
fi
}

View File

@ -1,4 +1,5 @@
#!/bin/bash
set -e
. ./common.sh
@ -25,7 +26,7 @@ test_gpu_speed_mnist () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python main.py --epochs 1 --no-log)
echo $runtime
echo "$runtime"
SAMPLE_ARRAY+=("${runtime}")
done
@ -33,12 +34,12 @@ test_gpu_speed_mnist () {
stats=$(python ../get_stats.py "${SAMPLE_ARRAY[@]}")
echo "Runtime stats in seconds:"
echo $stats
echo "$stats"
if [ "$2" == "compare_with_baseline" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}"
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}"
elif [ "$2" == "compare_and_update" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}" --update
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}" --update
fi
}

View File

@ -1,4 +1,5 @@
#!/bin/bash
set -e
. ./common.sh
@ -30,7 +31,7 @@ test_gpu_speed_word_language_model () {
for (( i=1; i<=NUM_RUNS; i++ )) do
runtime=$(get_runtime_of_command python main.py --cuda --epochs 1)
echo $runtime
echo "$runtime"
SAMPLE_ARRAY+=("${runtime}")
done
@ -38,12 +39,12 @@ test_gpu_speed_word_language_model () {
stats=$(python ../get_stats.py "${SAMPLE_ARRAY[@]}")
echo "Runtime stats in seconds:"
echo $stats
echo "$stats"
if [ "$2" == "compare_with_baseline" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}"
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}"
elif [ "$2" == "compare_and_update" ]; then
python ../compare_with_baseline.py --test-name ${FUNCNAME[0]} --sample-stats "${stats}" --update
python ../compare_with_baseline.py --test-name "${FUNCNAME[0]}" --sample-stats "${stats}" --update
fi
}

View File

@ -20,7 +20,8 @@ aws configure set default.s3.multipart_threshold 5GB
if [[ "$COMMIT_SOURCE" == master ]]; then
# Get current master commit hash
export MASTER_COMMIT_ID=$(git log --format="%H" -n 1)
MASTER_COMMIT_ID=$(git log --format="%H" -n 1)
export MASTER_COMMIT_ID
fi
# Find the master commit to test against
@ -28,17 +29,17 @@ git remote add upstream https://github.com/pytorch/pytorch.git
git fetch upstream
IFS=$'\n'
while IFS='' read -r commit_id; do
if aws s3 ls s3://ossci-perf-test/pytorch/cpu_runtime/${commit_id}.json; then
if aws s3 ls s3://ossci-perf-test/pytorch/cpu_runtime/"${commit_id}".json; then
LATEST_TESTED_COMMIT=${commit_id}
break
fi
done < <(git rev-list upstream/master)
aws s3 cp s3://ossci-perf-test/pytorch/cpu_runtime/${LATEST_TESTED_COMMIT}.json cpu_runtime.json
aws s3 cp s3://ossci-perf-test/pytorch/cpu_runtime/"${LATEST_TESTED_COMMIT}".json cpu_runtime.json
if [[ "$COMMIT_SOURCE" == master ]]; then
# Prepare new baseline file
cp cpu_runtime.json new_cpu_runtime.json
python update_commit_hash.py new_cpu_runtime.json ${MASTER_COMMIT_ID}
python update_commit_hash.py new_cpu_runtime.json "${MASTER_COMMIT_ID}"
fi
# Include tests
@ -68,5 +69,5 @@ run_test test_cpu_speed_mnist 20 ${TEST_MODE}
if [[ "$COMMIT_SOURCE" == master ]]; then
# This could cause race condition if we are testing the same master commit twice,
# but the chance of them executing this line at the same time is low.
aws s3 cp new_cpu_runtime.json s3://ossci-perf-test/pytorch/cpu_runtime/${MASTER_COMMIT_ID}.json --acl public-read
aws s3 cp new_cpu_runtime.json s3://ossci-perf-test/pytorch/cpu_runtime/"${MASTER_COMMIT_ID}".json --acl public-read
fi

View File

@ -20,7 +20,8 @@ aws configure set default.s3.multipart_threshold 5GB
if [[ "$COMMIT_SOURCE" == master ]]; then
# Get current master commit hash
export MASTER_COMMIT_ID=$(git log --format="%H" -n 1)
MASTER_COMMIT_ID=$(git log --format="%H" -n 1)
export MASTER_COMMIT_ID
fi
# Find the master commit to test against
@ -28,17 +29,17 @@ git remote add upstream https://github.com/pytorch/pytorch.git
git fetch upstream
IFS=$'\n'
while IFS='' read -r commit_id; do
if aws s3 ls s3://ossci-perf-test/pytorch/gpu_runtime/${commit_id}.json; then
if aws s3 ls s3://ossci-perf-test/pytorch/gpu_runtime/"${commit_id}".json; then
LATEST_TESTED_COMMIT=${commit_id}
break
fi
done < <(git rev-list upstream/master)
aws s3 cp s3://ossci-perf-test/pytorch/gpu_runtime/${LATEST_TESTED_COMMIT}.json gpu_runtime.json
aws s3 cp s3://ossci-perf-test/pytorch/gpu_runtime/"${LATEST_TESTED_COMMIT}".json gpu_runtime.json
if [[ "$COMMIT_SOURCE" == master ]]; then
# Prepare new baseline file
cp gpu_runtime.json new_gpu_runtime.json
python update_commit_hash.py new_gpu_runtime.json ${MASTER_COMMIT_ID}
python update_commit_hash.py new_gpu_runtime.json "${MASTER_COMMIT_ID}"
fi
# Include tests
@ -71,7 +72,7 @@ fi
if [[ "$COMMIT_SOURCE" == master ]]; then
# This could cause race condition if we are testing the same master commit twice,
# but the chance of them executing this line at the same time is low.
aws s3 cp new_gpu_runtime.json s3://ossci-perf-test/pytorch/gpu_runtime/${MASTER_COMMIT_ID}.json --acl public-read
aws s3 cp new_gpu_runtime.json s3://ossci-perf-test/pytorch/gpu_runtime/"${MASTER_COMMIT_ID}".json --acl public-read
fi
popd

View File

@ -175,7 +175,7 @@ test_without_numpy() {
# which transitively includes tbb.h which is not available!
if [[ "${BUILD_ENVIRONMENT}" == *tbb* ]]; then
sudo mkdir -p /usr/include/tbb
sudo cp -r $PWD/third_party/tbb/include/tbb/* /usr/include/tbb
sudo cp -r "$PWD"/third_party/tbb/include/tbb/* /usr/include/tbb
fi
test_libtorch() {
@ -387,7 +387,7 @@ test_vec256() {
vec256_tests=$(find . -maxdepth 1 -executable -name 'vec256_test*')
for vec256_exec in $vec256_tests
do
$vec256_exec --gtest_output=xml:test/test-reports/vec256/$vec256_exec.xml
$vec256_exec --gtest_output=xml:test/test-reports/vec256/"$vec256_exec".xml
done
popd
assert_git_not_dirty

View File

@ -16,14 +16,16 @@ SCRIPT_PARENT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# shellcheck source=./common.sh
source "$SCRIPT_PARENT_DIR/common.sh"
export IMAGE_COMMIT_ID=$(git rev-parse HEAD)
IMAGE_COMMIT_ID=$(git rev-parse HEAD)
export IMAGE_COMMIT_ID
export IMAGE_COMMIT_TAG=${BUILD_ENVIRONMENT}-${IMAGE_COMMIT_ID}
if [[ ${JOB_NAME} == *"develop"* ]]; then
export IMAGE_COMMIT_TAG=develop-${IMAGE_COMMIT_TAG}
fi
export TMP_DIR="${PWD}/build/win_tmp"
export TMP_DIR_WIN=$(cygpath -w "${TMP_DIR}")
TMP_DIR_WIN=$(cygpath -w "${TMP_DIR}")
export TMP_DIR_WIN
export PYTORCH_FINAL_PACKAGE_DIR="/c/w/build-results"
if [[ -n "$PYTORCH_FINAL_PACKAGE_DIR" ]]; then
mkdir -p "$PYTORCH_FINAL_PACKAGE_DIR" || true
@ -31,10 +33,10 @@ fi
# This directory is used only to hold "pytorch_env_restore.bat", called via "setup_pytorch_env.bat"
CI_SCRIPTS_DIR=$TMP_DIR/ci_scripts
mkdir -p $CI_SCRIPTS_DIR
mkdir -p "$CI_SCRIPTS_DIR"
if [ -n "$(ls $CI_SCRIPTS_DIR/*)" ]; then
rm $CI_SCRIPTS_DIR/*
if [ -n "$(ls "$CI_SCRIPTS_DIR"/*)" ]; then
rm "$CI_SCRIPTS_DIR"/*
fi
export SCRIPT_HELPERS_DIR=$SCRIPT_PARENT_DIR/win-test-helpers
@ -54,11 +56,11 @@ if [[ $PYLONG_API_CHECK == 0 ]]; then
fi
set -ex
$SCRIPT_HELPERS_DIR/build_pytorch.bat
"$SCRIPT_HELPERS_DIR"/build_pytorch.bat
assert_git_not_dirty
if [ ! -f ${TMP_DIR}/${IMAGE_COMMIT_TAG}.7z ] && [ ! ${BUILD_ENVIRONMENT} == "" ]; then
if [ ! -f "${TMP_DIR}"/"${IMAGE_COMMIT_TAG}".7z ] && [ ! "${BUILD_ENVIRONMENT}" == "" ]; then
exit 1
fi
echo "BUILD PASSED"

View File

@ -7,31 +7,36 @@ SCRIPT_PARENT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# shellcheck source=./common.sh
source "$SCRIPT_PARENT_DIR/common.sh"
export IMAGE_COMMIT_ID=$(git rev-parse HEAD)
IMAGE_COMMIT_ID=$(git rev-parse HEAD)
export IMAGE_COMMIT_ID
export IMAGE_COMMIT_TAG=${BUILD_ENVIRONMENT}-${IMAGE_COMMIT_ID}
if [[ ${JOB_NAME} == *"develop"* ]]; then
export IMAGE_COMMIT_TAG=develop-${IMAGE_COMMIT_TAG}
fi
export TMP_DIR="${PWD}/build/win_tmp"
export TMP_DIR_WIN=$(cygpath -w "${TMP_DIR}")
TMP_DIR_WIN=$(cygpath -w "${TMP_DIR}")
export TMP_DIR_WIN
export PROJECT_DIR="${PWD}"
export PROJECT_DIR_WIN=$(cygpath -w "${PROJECT_DIR}")
PROJECT_DIR_WIN=$(cygpath -w "${PROJECT_DIR}")
export PROJECT_DIR_WIN
export TEST_DIR="${PWD}/test"
export TEST_DIR_WIN=$(cygpath -w "${TEST_DIR}")
TEST_DIR_WIN=$(cygpath -w "${TEST_DIR}")
export TEST_DIR_WIN
export PYTORCH_FINAL_PACKAGE_DIR="/c/users/circleci/workspace/build-results"
export PYTORCH_FINAL_PACKAGE_DIR_WIN=$(cygpath -w "${PYTORCH_FINAL_PACKAGE_DIR}")
PYTORCH_FINAL_PACKAGE_DIR_WIN=$(cygpath -w "${PYTORCH_FINAL_PACKAGE_DIR}")
export PYTORCH_FINAL_PACKAGE_DIR_WIN
export PYTORCH_TEST_SKIP_NOARCH=1
mkdir -p $TMP_DIR/build/torch
mkdir -p "$TMP_DIR"/build/torch
# This directory is used only to hold "pytorch_env_restore.bat", called via "setup_pytorch_env.bat"
CI_SCRIPTS_DIR=$TMP_DIR/ci_scripts
mkdir -p $CI_SCRIPTS_DIR
mkdir -p "$CI_SCRIPTS_DIR"
if [ -n "$(ls $CI_SCRIPTS_DIR/*)" ]; then
rm $CI_SCRIPTS_DIR/*
if [ -n "$(ls "$CI_SCRIPTS_DIR"/*)" ]; then
rm "$CI_SCRIPTS_DIR"/*
fi
@ -56,22 +61,22 @@ run_tests() {
done
if [ -z "${JOB_BASE_NAME}" ] || [[ "${JOB_BASE_NAME}" == *-test ]]; then
$SCRIPT_HELPERS_DIR/test_python.bat "$DETERMINE_FROM"
$SCRIPT_HELPERS_DIR/test_custom_script_ops.bat
$SCRIPT_HELPERS_DIR/test_custom_backend.bat
$SCRIPT_HELPERS_DIR/test_libtorch.bat
"$SCRIPT_HELPERS_DIR"/test_python.bat "$DETERMINE_FROM"
"$SCRIPT_HELPERS_DIR"/test_custom_script_ops.bat
"$SCRIPT_HELPERS_DIR"/test_custom_backend.bat
"$SCRIPT_HELPERS_DIR"/test_libtorch.bat
else
export PYTORCH_COLLECT_COVERAGE=1
if [[ "${JOB_BASE_NAME}" == *-test1 ]]; then
$SCRIPT_HELPERS_DIR/test_python_first_shard.bat "$DETERMINE_FROM"
$SCRIPT_HELPERS_DIR/test_libtorch.bat
"$SCRIPT_HELPERS_DIR"/test_python_first_shard.bat "$DETERMINE_FROM"
"$SCRIPT_HELPERS_DIR"/test_libtorch.bat
if [[ "${USE_CUDA}" == "1" ]]; then
$SCRIPT_HELPERS_DIR/test_python_jit_legacy.bat "$DETERMINE_FROM"
"$SCRIPT_HELPERS_DIR"/test_python_jit_legacy.bat "$DETERMINE_FROM"
fi
elif [[ "${JOB_BASE_NAME}" == *-test2 ]]; then
$SCRIPT_HELPERS_DIR/test_python_second_shard.bat "$DETERMINE_FROM"
$SCRIPT_HELPERS_DIR/test_custom_backend.bat
$SCRIPT_HELPERS_DIR/test_custom_script_ops.bat
"$SCRIPT_HELPERS_DIR"/test_python_second_shard.bat "$DETERMINE_FROM"
"$SCRIPT_HELPERS_DIR"/test_custom_backend.bat
"$SCRIPT_HELPERS_DIR"/test_custom_script_ops.bat
fi
fi
}
@ -81,13 +86,13 @@ assert_git_not_dirty
echo "TEST PASSED"
if [[ "${BUILD_ENVIRONMENT}" == "pytorch-win-vs2019-cuda10-cudnn7-py3" ]]; then
pushd $TEST_DIR
pushd "$TEST_DIR"
python -mpip install coverage
echo "Generating XML coverage report"
time python -mcoverage xml
popd
pushd $PROJECT_DIR
pushd "$PROJECT_DIR"
python -mpip install codecov
python -mcodecov
popd

View File

@ -5,4 +5,4 @@
# .jenkins/run-shellcheck.sh --color=always | less -R
find .jenkins/pytorch -name *.sh | xargs shellcheck --external-sources -P SCRIPTDIR "$@"
find .jenkins/pytorch -name '*.sh' -print0 | xargs -0 -n1 shellcheck --external-sources