Compare commits

...

1 Commits

Author SHA1 Message Date
0722679587 chore: Update CI configuration for workflows
Signed-off-by: Adrien <adrien@huggingface.co>
2024-07-31 15:27:50 +02:00
3 changed files with 23 additions and 18 deletions

View File

@ -20,7 +20,8 @@ jobs:
fail-fast: false
matrix:
docker-image-name: ["huggingface/peft-gpu-bnb-source:latest", "huggingface/peft-gpu-bnb-latest:latest", "huggingface/peft-gpu-bnb-multi-source:latest"]
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
runs-on:
group: aws-g6-4xlarge-plus
env:
CUDA_VISIBLE_DEVICES: "0"
TEST_TYPE: "single_gpu_${{ matrix.docker-image-name }}"
@ -45,7 +46,7 @@ jobs:
echo "Checking out tag for Transformers version: v$transformers_version"
git fetch --tags
git checkout tags/v$transformers_version
cd ..
cd ..
fi
- name: Test bnb import
@ -129,7 +130,7 @@ jobs:
title: 🤗 Results of bitsandbytes transformers tests - single GPU
status: ${{ steps.transformers_tests.outcome }}
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
- name: Generate Report
if: always()
run: |
@ -142,7 +143,8 @@ jobs:
fail-fast: false
matrix:
docker-image-name: ["huggingface/peft-gpu-bnb-source:latest", "huggingface/peft-gpu-bnb-latest:latest", "huggingface/peft-gpu-bnb-multi-source:latest"]
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
runs-on:
group: aws-g6-12xlarge-plus
env:
CUDA_VISIBLE_DEVICES: "0,1"
TEST_TYPE: "multi_gpu_${{ matrix.docker-image-name }}"
@ -168,7 +170,7 @@ jobs:
git fetch --tags
git checkout tags/v$transformers_version
cd ..
fi
fi
- name: Test bnb import
id: import
@ -191,7 +193,7 @@ jobs:
if: always()
run: |
source activate peft
- name: Run examples on multi GPU
id: examples_tests
if: always()
@ -207,7 +209,7 @@ jobs:
title: 🤗 Results of bitsandbytes examples tests - multi GPU
status: ${{ steps.examples_tests.outcome }}
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
- name: Run core tests on multi GPU
id: core_tests
if: always()
@ -239,7 +241,7 @@ jobs:
title: 🤗 Results of bitsandbytes transformers tests - multi GPU
status: ${{ steps.transformers_tests.outcome }}
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
- name: Generate Report
if: always()
run: |

View File

@ -17,7 +17,8 @@ jobs:
run_all_tests_single_gpu:
strategy:
fail-fast: false
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
runs-on:
group: aws-g6-4xlarge-plus
env:
CUDA_VISIBLE_DEVICES: "0"
TEST_TYPE: "single_gpu"
@ -34,7 +35,7 @@ jobs:
source activate peft
pip install -e . --no-deps
pip install pytest-reportlog
- name: Run common tests on single GPU
run: |
source activate peft
@ -44,7 +45,7 @@ jobs:
run: |
source activate peft
make tests_examples_single_gpu
- name: Run core tests on single GPU
run: |
source activate peft
@ -54,7 +55,7 @@ jobs:
run: |
source activate peft
make tests_regression
- name: Generate Report
if: always()
run: |
@ -64,7 +65,8 @@ jobs:
run_all_tests_multi_gpu:
strategy:
fail-fast: false
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
runs-on:
group: aws-g6-12xlarge-plus
env:
CUDA_VISIBLE_DEVICES: "0,1"
TEST_TYPE: "multi_gpu"
@ -85,22 +87,22 @@ jobs:
- name: Run core GPU tests on multi-gpu
run: |
source activate peft
- name: Run common tests on multi GPU
run: |
source activate peft
make tests_common_gpu
- name: Run examples on multi GPU
run: |
source activate peft
make tests_examples_multi_gpu
- name: Run core tests on multi GPU
run: |
source activate peft
make tests_core_multi_gpu
- name: Generate Report
if: always()
run: |

View File

@ -19,7 +19,8 @@ env:
jobs:
run_tests_with_compile:
runs-on: [self-hosted, single-gpu, nvidia-gpu, a10, ci]
runs-on:
group: aws-g6-4xlarge-plus
env:
PEFT_DEBUG_WITH_TORCH_COMPILE: 1
CUDA_VISIBLE_DEVICES: "0"