diff --git a/.ci/caffe2/test.sh b/.ci/caffe2/test.sh index eaef1e3ebf88..7d1ce2fb4fa1 100755 --- a/.ci/caffe2/test.sh +++ b/.ci/caffe2/test.sh @@ -5,7 +5,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh" if [[ ${BUILD_ENVIRONMENT} == *onnx* ]]; then pip install click mock tabulate networkx==2.0 - pip -q install --user "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx" + pip -q install "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx" fi # Skip tests in environments where they are not built/applicable @@ -147,8 +147,8 @@ export DNNL_MAX_CPU_ISA=AVX2 if [[ "${SHARD_NUMBER:-1}" == "1" ]]; then # TODO(sdym@meta.com) remove this when the linked issue resolved. # py is temporary until https://github.com/Teemu/pytest-sugar/issues/241 is fixed - pip install --user py==1.11.0 - pip install --user pytest-sugar + pip install py==1.11.0 + pip install pytest-sugar # NB: Warnings are disabled because they make it harder to see what # the actual erroring test is "$PYTHON" \ diff --git a/.ci/onnx/test.sh b/.ci/onnx/test.sh index a7d3b72c62a7..d42ca2c218de 100755 --- a/.ci/onnx/test.sh +++ b/.ci/onnx/test.sh @@ -19,7 +19,7 @@ git config --global --add safe.directory /var/lib/jenkins/workspace if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then # TODO: This can be removed later once vision is also part of the Docker image - pip install -q --user --no-use-pep517 "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)" + pip install -q --no-use-pep517 "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)" # JIT C++ extensions require ninja, so put it into PATH. export PATH="/var/lib/jenkins/.local/bin:$PATH" # NB: ONNX test is fast (~15m) so it's ok to retry it few more times to avoid any flaky issue, we diff --git a/.ci/pytorch/common_utils.sh b/.ci/pytorch/common_utils.sh index 9c0e5242f433..3dbc2ece9e70 100644 --- a/.ci/pytorch/common_utils.sh +++ b/.ci/pytorch/common_utils.sh @@ -127,9 +127,9 @@ function install_torchaudio() { if [[ "$1" == "cuda" ]]; then # TODO: This is better to be passed as a parameter from _linux-test workflow # so that it can be consistent with what is set in build - TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}" + TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install --no-use-pep517 "git+https://github.com/pytorch/audio.git@${commit}" else - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/audio.git@${commit}" fi } @@ -139,8 +139,8 @@ function install_torchtext() { local text_commit data_commit=$(get_pinned_commit data) text_commit=$(get_pinned_commit text) - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/data.git@${data_commit}" - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/text.git@${text_commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/data.git@${data_commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/text.git@${text_commit}" } function install_torchvision() { @@ -153,7 +153,7 @@ function install_torchvision() { echo 'char* dlerror(void) { return "";}'|gcc -fpic -shared -o "${HOME}/dlerror.so" -x c - LD_PRELOAD=${orig_preload}:${HOME}/dlerror.so fi - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/vision.git@${commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/vision.git@${commit}" if [ -n "${LD_PRELOAD}" ]; then LD_PRELOAD=${orig_preload} fi @@ -173,7 +173,7 @@ function install_torchrec_and_fbgemm() { if [[ "$BUILD_ENVIRONMENT" == *rocm* ]] ; then # install torchrec first because it installs fbgemm nightly on top of rocm fbgemm - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" pip_uninstall fbgemm-gpu-nightly pip_install tabulate # needed for newer fbgemm @@ -190,8 +190,8 @@ function install_torchrec_and_fbgemm() { rm -rf fbgemm else # See https://github.com/pytorch/pytorch/issues/106971 - CUDA_PATH=/usr/local/cuda-12.1 pip_install --no-use-pep517 --user "git+https://github.com/pytorch/FBGEMM.git@${fbgemm_commit}#egg=fbgemm-gpu&subdirectory=fbgemm_gpu" - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" + CUDA_PATH=/usr/local/cuda-12.1 pip_install --no-use-pep517 "git+https://github.com/pytorch/FBGEMM.git@${fbgemm_commit}#egg=fbgemm-gpu&subdirectory=fbgemm_gpu" + pip_install --no-use-pep517 "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" fi } @@ -234,7 +234,7 @@ function checkout_install_torchbench() { function install_torchao() { local commit commit=$(get_pinned_commit torchao) - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/ao.git@${commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/ao.git@${commit}" } function print_sccache_stats() { diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh index 7b7e1970f72e..77004a176485 100755 --- a/.ci/pytorch/test.sh +++ b/.ci/pytorch/test.sh @@ -201,7 +201,7 @@ fi if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then # JIT C++ extensions require ninja. - pip_install --user "ninja==1.10.2" + pip_install "ninja==1.10.2" # ninja is installed in $HOME/.local/bin, e.g., /var/lib/jenkins/.local/bin for CI user jenkins # but this script should be runnable by any user, including root export PATH="$HOME/.local/bin:$PATH" @@ -496,7 +496,7 @@ DYNAMO_BENCHMARK_FLAGS=() pr_time_benchmarks() { - pip_install --user "fbscribelogger" + pip_install "fbscribelogger" TEST_REPORTS_DIR=$(pwd)/test/test-reports mkdir -p "$TEST_REPORTS_DIR" @@ -1471,8 +1471,8 @@ test_bazel() { test_benchmarks() { if [[ "$BUILD_ENVIRONMENT" == *cuda* && $TEST_CONFIG != *nogpu* ]]; then - pip_install --user "pytest-benchmark==3.2.3" - pip_install --user "requests" + pip_install "pytest-benchmark==3.2.3" + pip_install "requests" BENCHMARK_DATA="benchmarks/.data" mkdir -p ${BENCHMARK_DATA} pytest benchmarks/fastrnns/test_bench.py --benchmark-sort=Name --benchmark-json=${BENCHMARK_DATA}/fastrnns_default.json --fuser=default --executor=default