Forward fix periodic vision build (#161408)

Trying to forward fix: https://github.com/pytorch/pytorch/issues/161358 use SM 80 architecture by default
Pull Request resolved: https://github.com/pytorch/pytorch/pull/161408
Approved by: https://github.com/zou3519, https://github.com/huydhn

Co-authored-by: Huy Do <huydhn@gmail.com>
This commit is contained in:
atalman
2025-08-25 23:28:19 +00:00
committed by PyTorch MergeBot
parent 2cf7ac2fb7
commit 94b9569c4a
2 changed files with 15 additions and 9 deletions

View File

@ -149,13 +149,22 @@ function get_pinned_commit() {
cat .github/ci_commit_pins/"${1}".txt
}
function detect_cuda_arch() {
if [[ "${BUILD_ENVIRONMENT}" == *cuda* ]]; then
if command -v nvidia-smi; then
TORCH_CUDA_ARCH_LIST=$(nvidia-smi --query-gpu=compute_cap --format=csv | tail -n 1)
elif [[ "${TEST_CONFIG}" == *nogpu* ]]; then
# There won't be nvidia-smi in nogpu tests, so just set TORCH_CUDA_ARCH_LIST to the default
# minimum supported value here
TORCH_CUDA_ARCH_LIST=8.0
fi
export TORCH_CUDA_ARCH_LIST
fi
}
function install_torchaudio() {
local commit
commit=$(get_pinned_commit audio)
if [[ "${BUILD_ENVIRONMENT}" == *cuda* ]] && command -v nvidia-smi; then
TORCH_CUDA_ARCH_LIST=$(nvidia-smi --query-gpu=compute_cap --format=csv | tail -n 1)
export TORCH_CUDA_ARCH_LIST
fi
pip_build_and_install "git+https://github.com/pytorch/audio.git@${commit}" dist/audio
}

View File

@ -91,6 +91,7 @@ if [[ "$BUILD_ENVIRONMENT" == *clang9* || "$BUILD_ENVIRONMENT" == *xpu* ]]; then
export VALGRIND=OFF
fi
detect_cuda_arch
if [[ "$BUILD_ENVIRONMENT" == *s390x* ]]; then
# There are additional warnings on s390x, maybe due to newer gcc.
@ -1630,11 +1631,7 @@ elif [[ "${TEST_CONFIG}" == *xla* ]]; then
build_xla
test_xla
elif [[ "$TEST_CONFIG" == *vllm* ]]; then
if [[ "${BUILD_ENVIRONMENT}" == *cuda* ]]; then
TORCH_CUDA_ARCH_LIST=$(nvidia-smi --query-gpu=compute_cap --format=csv | tail -n 1)
export TORCH_CUDA_ARCH_LIST
fi
echo "VLLM CI TORCH_CUDA_ARCH_LIST: $TORCH_CUDA_ARCH_LIST"
echo "vLLM CI uses TORCH_CUDA_ARCH_LIST: $TORCH_CUDA_ARCH_LIST"
(cd .ci/lumen_cli && python -m pip install -e .)
python -m cli.run test external vllm --test-plan "$TEST_CONFIG" --shard-id "$SHARD_NUMBER" --num-shards "$NUM_TEST_SHARDS"
elif [[ "${TEST_CONFIG}" == *executorch* ]]; then