mirror of
https://github.com/huggingface/peft.git
synced 2025-10-21 07:53:47 +08:00
Compare commits
173 Commits
Author | SHA1 | Date | |
---|---|---|---|
b8da272660 | |||
61c57f4f65 | |||
f0b066eae8 | |||
8f39708650 | |||
f4cf170a9c | |||
b67c9b64fd | |||
5efeba1856 | |||
af275d2d42 | |||
9bc670eafb | |||
5d944589d2 | |||
152ed70b00 | |||
f5dd2acfed | |||
3b2ebf1ba1 | |||
adf0a1dc96 | |||
18f3efe5c0 | |||
4a8dedb2a7 | |||
25202271bc | |||
214f891cd2 | |||
7868d0372b | |||
734ea9a014 | |||
54be5a3db6 | |||
b180ae46f8 | |||
31fbbd2203 | |||
c9f7240afc | |||
95b39642fb | |||
37b9c5c74b | |||
01275b4cb3 | |||
679bcd8777 | |||
850eeb5c3a | |||
5996d39408 | |||
900f96c40d | |||
c3b63ce2c4 | |||
1a5d0f8151 | |||
f3c7c6e5c1 | |||
8fcb1951a5 | |||
fa218e1942 | |||
6c832c1dd4 | |||
95821e5ce4 | |||
25ab6c9bb2 | |||
b4cf1b3c46 | |||
eb5eb6efb5 | |||
f71e89f771 | |||
e8ba7de573 | |||
0222450f44 | |||
4c3a76fa68 | |||
670d0fac31 | |||
22f042a107 | |||
d6e772f192 | |||
042123465c | |||
41c274ecac | |||
9988cb9d00 | |||
fcac30bef5 | |||
2a5d3132e9 | |||
c869664891 | |||
4611034ff8 | |||
b9260305e3 | |||
f51428313f | |||
9a087823c6 | |||
46f78978f1 | |||
269aba5303 | |||
52a4ac9c2f | |||
c874ba3f1b | |||
f13d860e9f | |||
f6d3e38601 | |||
7e7b55880e | |||
1b16753a6a | |||
27833a2e60 | |||
273acf059e | |||
296fbcde3e | |||
f2b6d13f1d | |||
8aacb993e7 | |||
e6cd24c907 | |||
05f57e94ef | |||
2ce83e05c1 | |||
ebcd0792b8 | |||
ba75bb14d1 | |||
6472061a76 | |||
e02b938e02 | |||
5268495213 | |||
2aaf9cedbb | |||
a019f8690d | |||
2a6402f4b2 | |||
e72a96f7cf | |||
48e136d9bd | |||
58afb34ea0 | |||
01f1b992eb | |||
09358aad30 | |||
31c0d85755 | |||
018a1f49c4 | |||
1e2258d7f7 | |||
1e5227ff90 | |||
62122b5add | |||
9dc53b8fd5 | |||
db8b76fdb5 | |||
7ffa43b16e | |||
27bc3054a3 | |||
184beaf1d6 | |||
c9b19bb8f3 | |||
ef23712b13 | |||
d716adf31c | |||
d37dde61e1 | |||
5364351446 | |||
717db6e1c2 | |||
5194aef509 | |||
25c0fe9a55 | |||
e0e8204bc3 | |||
076561bbd3 | |||
efda766f51 | |||
d608f8329a | |||
19461353aa | |||
3831e06ab5 | |||
2f5360a7da | |||
8843a767da | |||
b6af7feb34 | |||
47b3d7422a | |||
7b1c08d2b5 | |||
a8286a7bff | |||
683db0fa2c | |||
0f89d34d82 | |||
0b40d1a304 | |||
03798a9143 | |||
d33c1f118e | |||
63a536b18e | |||
ad8f7cb59e | |||
3538e8ac7d | |||
b213ea5fb9 | |||
7ed94f3269 | |||
a0788a3f92 | |||
cb0bf07774 | |||
8cd2cb613b | |||
e7b75070c7 | |||
1b262167f3 | |||
39c60ffca9 | |||
8304017a9a | |||
b2922565c4 | |||
3cf5359f11 | |||
cb7aedd9ba | |||
47745d57c2 | |||
1fec23152a | |||
bc6a99906c | |||
691bc22ea6 | |||
fb7f2796e5 | |||
4e32679f37 | |||
3f7aacd601 | |||
e3eeabfad2 | |||
ae1ae20b76 | |||
2535036c24 | |||
e003ae7850 | |||
0649947396 | |||
b5acf5d6be | |||
748f7968f3 | |||
47b3712898 | |||
2558dd872d | |||
6f41990da4 | |||
d8fec400c7 | |||
32f3878870 | |||
cb08d095a5 | |||
86d086ec37 | |||
02ae6bcb37 | |||
77b7238b90 | |||
3edcebf713 | |||
e0cb15e2ee | |||
3ec55f4ac4 | |||
608a90ded9 | |||
e19f7bf424 | |||
250b7eb85f | |||
f5f7b67d60 | |||
7a22b7daf0 | |||
e7b47ac01d | |||
8bc3c0861d | |||
383e1fab0e | |||
d0fa70aeb6 | |||
b1d6c77108 |
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -23,7 +23,7 @@ body:
|
||||
|
||||
Please tag fewer than 3 people.
|
||||
|
||||
Library: @pacman100 @younesbelkada @benjaminbossan @sayakpaul
|
||||
Library: @benjaminbossan @sayakpaul
|
||||
|
||||
Documentation: @stevhliu
|
||||
|
||||
|
219
.github/workflows/build_docker_images.yml
vendored
219
.github/workflows/build_docker_images.yml
vendored
@ -16,18 +16,9 @@ env:
|
||||
jobs:
|
||||
latest-cpu:
|
||||
name: "Latest Peft CPU [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
@ -45,45 +36,20 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/peft-cpu
|
||||
|
||||
- name: Post to a Slack channel
|
||||
id: slack
|
||||
#uses: slackapi/slack-github-action@v1.25.0
|
||||
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
# Slack channel id, channel name, or user id to post message.
|
||||
# See also: https://api.slack.com/methods/chat.postMessage#channels
|
||||
channel-id: ${{ env.CI_SLACK_CHANNEL }}
|
||||
# For posting a rich message using Block Kit
|
||||
payload: |
|
||||
{
|
||||
"text": "peft-cpu Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "peft-cpu Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
slack_channel: ${{ env.CI_SLACK_CHANNEL }}
|
||||
title: 🤗 Results of the PEFT-CPU docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-cuda:
|
||||
name: "Latest Peft GPU [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
@ -101,45 +67,20 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/peft-gpu
|
||||
|
||||
- name: Post to a Slack channel
|
||||
id: slack
|
||||
#uses: slackapi/slack-github-action@v1.25.0
|
||||
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
# Slack channel id, channel name, or user id to post message.
|
||||
# See also: https://api.slack.com/methods/chat.postMessage#channels
|
||||
channel-id: ${{ env.CI_SLACK_CHANNEL }}
|
||||
# For posting a rich message using Block Kit
|
||||
payload: |
|
||||
{
|
||||
"text": "peft-gpu Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "peft-gpu Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
slack_channel: ${{ env.CI_SLACK_CHANNEL }}
|
||||
title: 🤗 Results of the PEFT-GPU docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-cuda-bnb-source:
|
||||
name: "Latest Peft GPU + bnb source [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
@ -157,47 +98,20 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/peft-gpu-bnb-source
|
||||
|
||||
|
||||
- name: Post to a Slack channel
|
||||
id: slack
|
||||
#uses: slackapi/slack-github-action@v1.25.0
|
||||
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
# Slack channel id, channel name, or user id to post message.
|
||||
# See also: https://api.slack.com/methods/chat.postMessage#channels
|
||||
channel-id: ${{ env.CI_SLACK_CHANNEL }}
|
||||
# For posting a rich message using Block Kit
|
||||
payload: |
|
||||
{
|
||||
"text": "peft-gpu + bnb-source (source) Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "peft-gpu + bnb-source (source) Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
slack_channel: ${{ env.CI_SLACK_CHANNEL }}
|
||||
title: 🤗 Results of the PEFT-GPU (bnb source / HF latest) docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-cuda-bnb-source-latest:
|
||||
name: "Latest Peft GPU + bnb source [accelerate / peft / transformers latest]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
@ -215,45 +129,20 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/peft-gpu-bnb-latest
|
||||
|
||||
- name: Post to a Slack channel
|
||||
id: slack
|
||||
#uses: slackapi/slack-github-action@v1.25.0
|
||||
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
# Slack channel id, channel name, or user id to post message.
|
||||
# See also: https://api.slack.com/methods/chat.postMessage#channels
|
||||
channel-id: ${{ env.CI_SLACK_CHANNEL }}
|
||||
# For posting a rich message using Block Kit
|
||||
payload: |
|
||||
{
|
||||
"text": "peft-gpu + bnb-source (latest) Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "peft-gpu + bnb-source (latest) Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
slack_channel: ${{ env.CI_SLACK_CHANNEL }}
|
||||
title: 🤗 Results of the PEFT-GPU (bnb source / HF source) docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-cuda-bnb-source-multi:
|
||||
name: "Latest Peft GPU + bnb (multi-backend) source [accelerate / peft / transformers source]"
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
@ -271,27 +160,13 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/peft-gpu-bnb-multi-source
|
||||
|
||||
- name: Post to a Slack channel
|
||||
id: slack
|
||||
#uses: slackapi/slack-github-action@v1.25.0
|
||||
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
# Slack channel id, channel name, or user id to post message.
|
||||
# See also: https://api.slack.com/methods/chat.postMessage#channels
|
||||
channel-id: ${{ env.CI_SLACK_CHANNEL }}
|
||||
# For posting a rich message using Block Kit
|
||||
payload: |
|
||||
{
|
||||
"text": "peft-gpu + bnb-source (latest) Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}",
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "peft-gpu + bnb-source (latest) Docker Image build result: ${{ job.status }}\n${{ github.event.pull_request.html_url || github.event.head_commit.url }}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
slack_channel: ${{ env.CI_SLACK_CHANNEL }}
|
||||
title: 🤗 Results of the PEFT-GPU (bnb source multi-backend / HF latest) docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
|
||||
|
3
.github/workflows/build_documentation.yml
vendored
3
.github/workflows/build_documentation.yml
vendored
@ -14,6 +14,7 @@ jobs:
|
||||
commit_sha: ${{ github.sha }}
|
||||
package: peft
|
||||
notebook_folder: peft_docs
|
||||
custom_container: huggingface/transformers-doc-builder
|
||||
secrets:
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
|
1
.github/workflows/build_pr_documentation.yml
vendored
1
.github/workflows/build_pr_documentation.yml
vendored
@ -14,3 +14,4 @@ jobs:
|
||||
commit_sha: ${{ github.event.pull_request.head.sha }}
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: peft
|
||||
custom_container: huggingface/transformers-doc-builder
|
||||
|
134
.github/workflows/nightly-bnb.yml
vendored
134
.github/workflows/nightly-bnb.yml
vendored
@ -15,11 +15,13 @@ env:
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
docker-image-name: ["huggingface/peft-gpu-bnb-source:latest", "huggingface/peft-gpu-bnb-latest:latest", "huggingface/peft-gpu-bnb-multi-source:latest"]
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu_${{ matrix.docker-image-name }}"
|
||||
@ -44,26 +46,91 @@ jobs:
|
||||
echo "Checking out tag for Transformers version: v$transformers_version"
|
||||
git fetch --tags
|
||||
git checkout tags/v$transformers_version
|
||||
cd ..
|
||||
cd ..
|
||||
fi
|
||||
|
||||
- name: Test bnb import
|
||||
id: import
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
python3 -m bitsandbytes
|
||||
python3 -c "import bitsandbytes as bnb"
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.BNB_SLACK_CHANNEL_ID }}
|
||||
title: 🤗 Results of bitsandbytes import
|
||||
status: ${{ steps.import.outcome }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
- name: Run examples on single GPU
|
||||
id: examples_tests
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_examples_single_gpu_bnb
|
||||
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.BNB_SLACK_CHANNEL_ID }}
|
||||
title: 🤗 Results of bitsandbytes examples tests - single GPU
|
||||
status: ${{ steps.examples_tests.outcome }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
- name: Run core tests on single GPU
|
||||
id: core_tests
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_core_single_gpu_bnb
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.BNB_SLACK_CHANNEL_ID }}
|
||||
title: 🤗 Results of bitsandbytes core tests - single GPU
|
||||
status: ${{ steps.core_tests.outcome }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
# TODO: this is a test to see if BNB multi-backend single-GPU tests succeed w/o regression tests
|
||||
# - name: Run BNB regression tests on single GPU
|
||||
# id: regression_tests
|
||||
# if: always()
|
||||
# run: |
|
||||
# source activate peft
|
||||
# make tests_gpu_bnb_regression
|
||||
|
||||
# - name: Post to Slack
|
||||
# if: always()
|
||||
# uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
# with:
|
||||
# slack_channel: ${{ secrets.BNB_SLACK_CHANNEL_ID }}
|
||||
# title: 🤗 Results of bitsandbytes regression tests - single GPU
|
||||
# status: ${{ steps.regression_tests.outcome }}
|
||||
# slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
- name: Run transformers tests on single GPU
|
||||
id: transformers_tests
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make transformers_tests
|
||||
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.BNB_SLACK_CHANNEL_ID }}
|
||||
title: 🤗 Results of bitsandbytes transformers tests - single GPU
|
||||
status: ${{ steps.transformers_tests.outcome }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
@ -71,11 +138,13 @@ jobs:
|
||||
python scripts/log_reports.py --slack_channel_name bnb-daily-ci-collab >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
docker-image-name: ["huggingface/peft-gpu-bnb-source:latest", "huggingface/peft-gpu-bnb-latest:latest", "huggingface/peft-gpu-bnb-multi-source:latest"]
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
runs-on:
|
||||
group: aws-g6-12xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu_${{ matrix.docker-image-name }}"
|
||||
@ -101,31 +170,78 @@ jobs:
|
||||
git fetch --tags
|
||||
git checkout tags/v$transformers_version
|
||||
cd ..
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Test bnb import
|
||||
id: import
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
python3 -m bitsandbytes
|
||||
python3 -c "import bitsandbytes as bnb"
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.BNB_SLACK_CHANNEL_ID }}
|
||||
title: 🤗 Results of bitsandbytes import
|
||||
status: ${{ steps.import.outcome }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
- name: Run core GPU tests on multi-gpu
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
|
||||
|
||||
- name: Run examples on multi GPU
|
||||
id: examples_tests
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_examples_multi_gpu_bnb
|
||||
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.BNB_SLACK_CHANNEL_ID }}
|
||||
title: 🤗 Results of bitsandbytes examples tests - multi GPU
|
||||
status: ${{ steps.examples_tests.outcome }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
- name: Run core tests on multi GPU
|
||||
id: core_tests
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_core_multi_gpu_bnb
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.BNB_SLACK_CHANNEL_ID }}
|
||||
title: 🤗 Results of bitsandbytes core tests - multi GPU
|
||||
status: ${{ steps.core_tests.outcome }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
- name: Run transformers tests on multi GPU
|
||||
id: transformers_tests
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make transformers_tests
|
||||
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.BNB_SLACK_CHANNEL_ID }}
|
||||
title: 🤗 Results of bitsandbytes transformers tests - multi GPU
|
||||
status: ${{ steps.transformers_tests.outcome }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
|
20
.github/workflows/nightly.yml
vendored
20
.github/workflows/nightly.yml
vendored
@ -17,7 +17,8 @@ jobs:
|
||||
run_all_tests_single_gpu:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
@ -34,7 +35,7 @@ jobs:
|
||||
source activate peft
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog
|
||||
|
||||
|
||||
- name: Run common tests on single GPU
|
||||
run: |
|
||||
source activate peft
|
||||
@ -44,7 +45,7 @@ jobs:
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_examples_single_gpu
|
||||
|
||||
|
||||
- name: Run core tests on single GPU
|
||||
run: |
|
||||
source activate peft
|
||||
@ -54,7 +55,7 @@ jobs:
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_regression
|
||||
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
@ -64,7 +65,8 @@ jobs:
|
||||
run_all_tests_multi_gpu:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
runs-on:
|
||||
group: aws-g6-12xlarge-plus
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu"
|
||||
@ -85,22 +87,22 @@ jobs:
|
||||
- name: Run core GPU tests on multi-gpu
|
||||
run: |
|
||||
source activate peft
|
||||
|
||||
|
||||
- name: Run common tests on multi GPU
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_common_gpu
|
||||
|
||||
|
||||
- name: Run examples on multi GPU
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_examples_multi_gpu
|
||||
|
||||
|
||||
- name: Run core tests on multi GPU
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_core_multi_gpu
|
||||
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
|
5
.github/workflows/stale.yml
vendored
5
.github/workflows/stale.yml
vendored
@ -9,6 +9,9 @@ jobs:
|
||||
name: Close Stale Issues
|
||||
if: github.repository == 'huggingface/peft'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
@ -24,4 +27,4 @@ jobs:
|
||||
pip install PyGithub
|
||||
- name: Close stale issues
|
||||
run: |
|
||||
python scripts/stale.py
|
||||
python scripts/stale.py
|
||||
|
8
.github/workflows/tests-main.yml
vendored
8
.github/workflows/tests-main.yml
vendored
@ -26,3 +26,11 @@ jobs:
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
make test
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.SLACK_CHANNEL_ID }}
|
||||
title: 🤗 Results of transformers main tests
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
8
.github/workflows/tests.yml
vendored
8
.github/workflows/tests.yml
vendored
@ -31,6 +31,8 @@ jobs:
|
||||
tests:
|
||||
needs: check_code_quality
|
||||
strategy:
|
||||
# TODO: remove 'fail-fast' line once timeout issue from the Hub is solved
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
os: ["ubuntu-latest", "macos-12", "windows-latest"]
|
||||
@ -48,6 +50,12 @@ jobs:
|
||||
python -m pip install --upgrade pip
|
||||
# cpu version of pytorch
|
||||
pip install -e .[test]
|
||||
- name: Downgrade numpy on MacOS and Windows
|
||||
# TODO: remove numpy downgrade on MacOS & Windows once torch fixes numpy 2.0 issue
|
||||
shell: bash
|
||||
if: matrix.os == 'windows-latest' || matrix.os == 'macos-12'
|
||||
run: |
|
||||
pip install --force-reinstall -U "numpy<2.0.0"
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
make test
|
||||
|
37
.github/workflows/torch_compile_tests.yml
vendored
37
.github/workflows/torch_compile_tests.yml
vendored
@ -1,7 +1,5 @@
|
||||
name: torch compile tests
|
||||
|
||||
# see peft/tests/__init__.py
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@ -13,31 +11,42 @@ on:
|
||||
required: false
|
||||
default: false
|
||||
|
||||
env:
|
||||
RUN_SLOW: "yes"
|
||||
IS_GITHUB_CI: "1"
|
||||
# To be able to run tests on CUDA 12.2
|
||||
NVIDIA_DISABLE_REQUIRE: "1"
|
||||
|
||||
jobs:
|
||||
run_tests_with_compile:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on:
|
||||
group: aws-g6-4xlarge-plus
|
||||
env:
|
||||
PEFT_DEBUG_WITH_TORCH_COMPILE: 1
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu_huggingface/peft-gpu-bnb-latest:latest"
|
||||
container:
|
||||
image: "huggingface/peft-gpu-bnb-latest:latest"
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "pip"
|
||||
cache-dependency-path: "setup.py"
|
||||
- name: Install dependencies
|
||||
- name: Pip install
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install .[test]
|
||||
source activate peft
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-cov pytest-reportlog parameterized datasets scipy einops
|
||||
pip install "pytest>=7.2.0,<8.0.0" # see: https://github.com/huggingface/transformers/blob/ce4fff0be7f6464d713f7ac3e0bbaafbc6959ae5/setup.py#L148C6-L148C26
|
||||
if [ "${{ github.event.inputs.pytorch_nightly }}" = "true" ]; then
|
||||
python -m pip install --upgrade --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
fi
|
||||
- name: Test compile with pytest
|
||||
run: |
|
||||
source activate peft
|
||||
echo "PEFT_DEBUG_WITH_TORCH_COMPILE=$PEFT_DEBUG_WITH_TORCH_COMPILE"
|
||||
git status
|
||||
make test
|
||||
make tests_torch_compile
|
||||
|
15
.github/workflows/trufflehog.yml
vendored
Normal file
15
.github/workflows/trufflehog.yml
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
on:
|
||||
push:
|
||||
|
||||
name: Secret Leaks
|
||||
|
||||
jobs:
|
||||
trufflehog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
@ -1,13 +1,13 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.2.1
|
||||
rev: v0.6.1
|
||||
hooks:
|
||||
- id: ruff
|
||||
args:
|
||||
- --fix
|
||||
- id: ruff-format
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: check-merge-conflict
|
||||
- id: check-yaml
|
||||
|
10
Makefile
10
Makefile
@ -6,13 +6,13 @@ check_dirs := src tests examples docs scripts docker
|
||||
|
||||
# this target runs checks on all files
|
||||
quality:
|
||||
ruff $(check_dirs)
|
||||
ruff check $(check_dirs)
|
||||
ruff format --check $(check_dirs)
|
||||
doc-builder style src/peft tests docs/source --max_len 119 --check_only
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
style:
|
||||
ruff $(check_dirs) --fix
|
||||
ruff check --fix $(check_dirs)
|
||||
ruff format $(check_dirs)
|
||||
doc-builder style src/peft tests docs/source --max_len 119
|
||||
|
||||
@ -47,9 +47,15 @@ tests_core_multi_gpu_bnb:
|
||||
tests_core_single_gpu_bnb:
|
||||
python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",)
|
||||
|
||||
tests_gpu_bnb_regression:
|
||||
python -m pytest tests/bnb/test_bnb_regression.py $(if $(IS_GITHUB_CI),--report-log "bnb_regression_gpu.log",)
|
||||
|
||||
# For testing transformers tests for bnb runners
|
||||
transformers_tests:
|
||||
RUN_SLOW=1 python -m pytest transformers-clone/tests/quantization/bnb $(if $(IS_GITHUB_CI),--report-log "transformers_tests.log",)
|
||||
|
||||
tests_regression:
|
||||
python -m pytest -s --regression tests/regression/ $(if $(IS_GITHUB_CI),--report-log "regression_tests.log",)
|
||||
|
||||
tests_torch_compile:
|
||||
python -m pytest tests/test_torch_compile.py $(if $(IS_GITHUB_CI),--report-log "compile_tests.log",)
|
||||
|
@ -42,9 +42,9 @@ RUN source activate peft && \
|
||||
|
||||
# Add autoawq for quantization testing
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.1/autoawq-0.2.1-cp38-cp38-linux_x86_64.whl
|
||||
python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.4/autoawq-0.2.4-cp38-cp38-linux_x86_64.whl
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ_kernels/releases/download/v0.0.4/autoawq_kernels-0.0.4-cp38-cp38-linux_x86_64.whl
|
||||
python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ_kernels/releases/download/v0.0.6/autoawq_kernels-0.0.6-cp38-cp38-linux_x86_64.whl
|
||||
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
@ -52,6 +52,10 @@ RUN apt-get update && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
# Add eetq for quantization testing
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install git+https://github.com/NetEase-FuXi/EETQ.git
|
||||
|
||||
# Activate the conda env and install transformers + accelerate from source
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install -U --no-cache-dir \
|
||||
@ -66,6 +70,10 @@ RUN source activate peft && \
|
||||
RUN source activate peft && \
|
||||
pip install aqlm[gpu]>=1.0.2
|
||||
|
||||
# Add HQQ for quantization testing
|
||||
RUN source activate peft && \
|
||||
pip install hqq
|
||||
|
||||
RUN source activate peft && \
|
||||
pip freeze | grep transformers
|
||||
|
||||
|
@ -37,10 +37,14 @@
|
||||
title: Adapter injection
|
||||
- local: developer_guides/mixed_models
|
||||
title: Mixed adapter types
|
||||
- local: developer_guides/torch_compile
|
||||
title: torch.compile
|
||||
- local: developer_guides/contributing
|
||||
title: Contribute to PEFT
|
||||
- local: developer_guides/troubleshooting
|
||||
title: Troubleshooting
|
||||
- local: developer_guides/checkpoint
|
||||
title: PEFT checkpoint format
|
||||
|
||||
- title: 🤗 Accelerate integrations
|
||||
sections:
|
||||
@ -86,6 +90,8 @@
|
||||
title: LoKr
|
||||
- local: package_reference/lora
|
||||
title: LoRA
|
||||
- local: package_reference/xlora
|
||||
title: X-LoRA
|
||||
- local: package_reference/adapter_utils
|
||||
title: LyCORIS
|
||||
- local: package_reference/multitask_prompt_tuning
|
||||
@ -102,12 +108,20 @@
|
||||
title: Prefix tuning
|
||||
- local: package_reference/prompt_tuning
|
||||
title: Prompt tuning
|
||||
- local: package_reference/layernorm_tuning
|
||||
title: Layernorm tuning
|
||||
- local: package_reference/vera
|
||||
title: VeRA
|
||||
- local: package_reference/fourierft
|
||||
title: FourierFT
|
||||
- local: package_reference/vblora
|
||||
title: VB-LoRA
|
||||
|
||||
title: Adapters
|
||||
- sections:
|
||||
- local: package_reference/merge_utils
|
||||
title: Model merge
|
||||
- local: package_reference/helpers
|
||||
title: Helpers
|
||||
title: Utilities
|
||||
title: API reference
|
||||
|
||||
|
@ -94,7 +94,7 @@ accelerate launch --config_file "configs/deepspeed_config.yaml" train.py \
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
@ -217,7 +217,7 @@ accelerate launch --config_file "configs/deepspeed_config_z3_qlora.yaml" train.
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
|
@ -74,7 +74,7 @@ accelerate launch --config_file "configs/fsdp_config.yaml" train.py \
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
@ -218,7 +218,7 @@ accelerate launch --config_file "configs/fsdp_config_qlora.yaml" train.py \
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
@ -249,7 +249,7 @@ accelerate launch --config_file "configs/fsdp_config_qlora.yaml" train.py \
|
||||
--bnb_4bit_quant_storage_dtype "bfloat16"
|
||||
```
|
||||
|
||||
Notice the new argument being passed, `bnb_4bit_quant_storage_dtype`, which denotes the data type for packing the 4-bit parameters. For example, when it is set to `bfloat16`, **32/4 = 8** 4-bit params are packed together post quantization. When using mixed precision training with `bfloat16`, `bnb_4bit_quant_storage_dtype` can be either `bfloat16` for pure `bfloat16` finetuning, or `float32` for automatic mixed precision (this consumes more GPU memory). When using mixed precision training with `float16`, `bnb_4bit_quant_storage_dtype` should be set to `float32` for stable automatic mixed precision training.
|
||||
Notice the new argument being passed, `bnb_4bit_quant_storage_dtype`, which denotes the data type for packing the 4-bit parameters. For example, when it is set to `bfloat16`, **16/4 = 4** 4-bit params are packed together post quantization. When using mixed precision training with `bfloat16`, `bnb_4bit_quant_storage_dtype` can be either `bfloat16` for pure `bfloat16` finetuning, or `float32` for automatic mixed precision (this consumes more GPU memory). When using mixed precision training with `float16`, `bnb_4bit_quant_storage_dtype` should be set to `float32` for stable automatic mixed precision training.
|
||||
|
||||
In terms of training code, the important code changes are:
|
||||
|
||||
@ -288,4 +288,5 @@ You can also refer the [llama-recipes](https://github.com/facebookresearch/llama
|
||||
1. Merging when using PEFT and FSDP is currently unsupported and will raise error.
|
||||
2. Passing `modules_to_save` config parameter to is untested at present.
|
||||
3. GPU Memory saving when using CPU Offloading is untested at present.
|
||||
4. When using FSDP+QLoRA, `paged_adamw_8bit` currently results in an error when saving a checkpoint.
|
||||
4. When using FSDP+QLoRA, `paged_adamw_8bit` currently results in an error when saving a checkpoint.
|
||||
5. DoRA training with FSDP should work (albeit at lower speed than LoRA). If combined with bitsandbytes (QDoRA), 4-bit quantization should also work, but 8-bit quantization has known issues and is not recommended.
|
||||
|
@ -50,6 +50,18 @@ In principle, LoRA can be applied to any subset of weight matrices in a neural n
|
||||
</div>
|
||||
<small><a href="https://hf.co/papers/2103.10385">Navigating Text-To-Image Customization: From LyCORIS Fine-Tuning to Model Evaluation</a></small>
|
||||
|
||||
## Mixture of LoRA Experts (X-LoRA)
|
||||
|
||||
[X-LoRA](https://arxiv.org/abs/2402.07148) is a mixture of experts method for LoRA which works by using dense or sparse gating to dynamically activate LoRA experts. The LoRA experts as well as the base model are frozen during training, resulting in a low parameter count as only the gating layers must be trained. In particular, the gating layers output scalings which (depending on config) are granular on the layer and token level. Additionally, during inference, X-LoRA dynamically activates LoRA adapters to recall knowledge and effectively mix them:
|
||||
|
||||
The below graphic demonstrates how the scalings change for different prompts for each token. This highlights the activation of different adapters as the generation progresses and the sequence creates new context.
|
||||
|
||||

|
||||
|
||||
For each step, X-LoRA requires the base model to be run twice: first, to get hidden states without any LoRA adapters, and secondly, the hidden states are used to calculate scalings which are applied to the LoRA adapters and the model is run a second time. The output of the second run is the result of the model step.
|
||||
|
||||
Ultimately, X-LoRA allows the model to reflect upon it's knowledge because of the dual forward pass scheme, and dynamically reconfigure the architecture.
|
||||
|
||||
## Low-Rank Hadamard Product (LoHa)
|
||||
|
||||
Low-rank decomposition can impact performance because the weight updates are limited to the low-rank space, which can constrain a model's expressiveness. However, you don't necessarily want to use a larger rank because it increases the number of trainable parameters. To address this, [LoHa](https://huggingface.co/papers/2108.06098) (a method originally developed for computer vision) was applied to diffusion models where the ability to generate diverse images is an important consideration. LoHa should also work with general model types, but the embedding layers aren't currently implemented in PEFT.
|
||||
|
@ -64,9 +64,9 @@ Take a look at [P-tuning for sequence classification](../task_guides/ptuning-seq
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/mpt.png"/>
|
||||
</div>
|
||||
<small><a href="https://hf.co/papers/2103.10385">Multitask prompt tuning enables parameter-efficient transfer learning</a>.</small>
|
||||
<small><a href="https://hf.co/papers/2303.02861">Multitask prompt tuning enables parameter-efficient transfer learning</a>.</small>
|
||||
|
||||
[Multitask prompt tuning (MPT)](https://hf.co/papers/2103.10385) learns a single prompt from data for multiple task types that can be shared for different target tasks. Other existing approaches learn a separate soft prompt for each task that need to be retrieved or aggregated for adaptation to target tasks. MPT consists of two stages:
|
||||
[Multitask prompt tuning (MPT)](https://hf.co/papers/2303.02861) learns a single prompt from data for multiple task types that can be shared for different target tasks. Other existing approaches learn a separate soft prompt for each task that need to be retrieved or aggregated for adaptation to target tasks. MPT consists of two stages:
|
||||
|
||||
1. source training - for each task, its soft prompt is decomposed into task-specific vectors. The task-specific vectors are multiplied together to form another matrix W, and the Hadamard product is used between W and a shared prompt matrix P to generate a task-specific prompt matrix. The task-specific prompts are distilled into a single prompt matrix that is shared across all tasks. This prompt is trained with multitask training.
|
||||
2. target adaptation - to adapt the single prompt for a target task, a target prompt is initialized and expressed as the Hadamard product of the shared prompt matrix and the task-specific low-rank prompt matrix.
|
||||
|
250
docs/source/developer_guides/checkpoint.md
Normal file
250
docs/source/developer_guides/checkpoint.md
Normal file
@ -0,0 +1,250 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# PEFT checkpoint format
|
||||
|
||||
This document describes how PEFT's checkpoint files are structured and how to convert between the PEFT format and other formats.
|
||||
|
||||
## PEFT files
|
||||
|
||||
PEFT (parameter-efficient fine-tuning) methods only update a small subset of a model's parameters rather than all of them. This is nice because checkpoint files can generally be much smaller than the original model files and are easier to store and share. However, this also means that to load a PEFT model, you need to have the original model available as well.
|
||||
|
||||
When you call [`~PeftModel.save_pretrained`] on a PEFT model, the PEFT model saves three files, described below:
|
||||
|
||||
1. `adapter_model.safetensors` or `adapter_model.bin`
|
||||
|
||||
By default, the model is saved in the `safetensors` format, a secure alternative to the `bin` format, which is known to be susceptible to [security vulnerabilities](https://huggingface.co/docs/hub/security-pickle) because it uses the pickle utility under the hood. Both formats store the same `state_dict` though, and are interchangeable.
|
||||
|
||||
The `state_dict` only contains the parameters of the adapter module, not the base model. To illustrate the difference in size, a normal BERT model requires ~420MB of disk space, whereas an IA³ adapter on top of this BERT model only requires ~260KB.
|
||||
|
||||
2. `adapter_config.json`
|
||||
|
||||
The `adapter_config.json` file contains the configuration of the adapter module, which is necessary to load the model. Below is an example of an `adapter_config.json` for an IA³ adapter with standard settings applied to a BERT model:
|
||||
|
||||
```json
|
||||
{
|
||||
"auto_mapping": {
|
||||
"base_model_class": "BertModel",
|
||||
"parent_library": "transformers.models.bert.modeling_bert"
|
||||
},
|
||||
"base_model_name_or_path": "bert-base-uncased",
|
||||
"fan_in_fan_out": false,
|
||||
"feedforward_modules": [
|
||||
"output.dense"
|
||||
],
|
||||
"inference_mode": true,
|
||||
"init_ia3_weights": true,
|
||||
"modules_to_save": null,
|
||||
"peft_type": "IA3",
|
||||
"revision": null,
|
||||
"target_modules": [
|
||||
"key",
|
||||
"value",
|
||||
"output.dense"
|
||||
],
|
||||
"task_type": null
|
||||
}
|
||||
```
|
||||
|
||||
The configuration file contains:
|
||||
|
||||
- the adapter module type stored, `"peft_type": "IA3"`
|
||||
- information about the base model like `"base_model_name_or_path": "bert-base-uncased"`
|
||||
- the revision of the model (if any), `"revision": null`
|
||||
|
||||
If the base model is not a pretrained Transformers model, the latter two entries will be `null`. Other than that, the settings are all related to the specific IA³ adapter that was used to fine-tune the model.
|
||||
|
||||
3. `README.md`
|
||||
|
||||
The generated `README.md` is the model card of a PEFT model and contains a few pre-filled entries. The intent of this is to make it easier to share the model with others and to provide some basic information about the model. This file is not needed to load the model.
|
||||
|
||||
## Convert to PEFT format
|
||||
|
||||
When converting from another format to the PEFT format, we require both the `adapter_model.safetensors` (or `adapter_model.bin`) file and the `adapter_config.json` file.
|
||||
|
||||
### adapter_model
|
||||
|
||||
For the model weights, it is important to use the correct mapping from parameter name to value for PEFT to load the file. Getting this mapping right is an exercise in checking the implementation details, as there is no generally agreed upon format for PEFT adapters.
|
||||
|
||||
Fortunately, figuring out this mapping is not overly complicated for common base cases. Let's look at a concrete example, the [`LoraLayer`](https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/layer.py):
|
||||
|
||||
```python
|
||||
# showing only part of the code
|
||||
|
||||
class LoraLayer(BaseTunerLayer):
|
||||
# All names of layers that may contain (trainable) adapter weights
|
||||
adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B")
|
||||
# All names of other parameters that may contain adapter-related parameters
|
||||
other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout")
|
||||
|
||||
def __init__(self, base_layer: nn.Module, **kwargs) -> None:
|
||||
self.base_layer = base_layer
|
||||
self.r = {}
|
||||
self.lora_alpha = {}
|
||||
self.scaling = {}
|
||||
self.lora_dropout = nn.ModuleDict({})
|
||||
self.lora_A = nn.ModuleDict({})
|
||||
self.lora_B = nn.ModuleDict({})
|
||||
# For Embedding layer
|
||||
self.lora_embedding_A = nn.ParameterDict({})
|
||||
self.lora_embedding_B = nn.ParameterDict({})
|
||||
# Mark the weight as unmerged
|
||||
self._disable_adapters = False
|
||||
self.merged_adapters = []
|
||||
self.use_dora: dict[str, bool] = {}
|
||||
self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA
|
||||
self._caches: dict[str, Any] = {}
|
||||
self.kwargs = kwargs
|
||||
```
|
||||
|
||||
In the `__init__` code used by all `LoraLayer` classes in PEFT, there are a bunch of parameters used to initialize the model, but only a few are relevant for the checkpoint file: `lora_A`, `lora_B`, `lora_embedding_A`, and `lora_embedding_B`. These parameters are listed in the class attribute `adapter_layer_names` and contain the learnable parameters, so they must be included in the checkpoint file. All the other parameters, like the rank `r`, are derived from the `adapter_config.json` and must be included there (unless the default value is used).
|
||||
|
||||
Let's check the `state_dict` of a PEFT LoRA model applied to BERT. When printing the first five keys using the default LoRA settings (the remaining keys are the same, just with different layer numbers), we get:
|
||||
|
||||
- `base_model.model.encoder.layer.0.attention.self.query.lora_A.weight`
|
||||
- `base_model.model.encoder.layer.0.attention.self.query.lora_B.weight`
|
||||
- `base_model.model.encoder.layer.0.attention.self.value.lora_A.weight`
|
||||
- `base_model.model.encoder.layer.0.attention.self.value.lora_B.weight`
|
||||
- `base_model.model.encoder.layer.1.attention.self.query.lora_A.weight`
|
||||
- etc.
|
||||
|
||||
Let's break this down:
|
||||
|
||||
- By default, for BERT models, LoRA is applied to the `query` and `value` layers of the attention module. This is why you see `attention.self.query` and `attention.self.value` in the key names for each layer.
|
||||
- LoRA decomposes the weights into two low-rank matrices, `lora_A` and `lora_B`. This is where `lora_A` and `lora_B` come from in the key names.
|
||||
- These LoRA matrices are implemented as `nn.Linear` layers, so the parameters are stored in the `.weight` attribute (`lora_A.weight`, `lora_B.weight`).
|
||||
- By default, LoRA isn't applied to BERT's embedding layer, so there are _no entries_ for `lora_A_embedding` and `lora_B_embedding`.
|
||||
- The keys of the `state_dict` always start with `"base_model.model."`. The reason is that, in PEFT, we wrap the base model inside a tuner-specific model (`LoraModel` in this case), which itself is wrapped in a general PEFT model (`PeftModel`). For this reason, these two prefixes are added to the keys. When converting to the PEFT format, it is required to add these prefixes.
|
||||
|
||||
<Tip>
|
||||
|
||||
This last point is not true for prefix tuning techniques like prompt tuning. There, the extra embeddings are directly stored in the `state_dict` without any prefixes added to the keys.
|
||||
|
||||
</Tip>
|
||||
|
||||
When inspecting the parameter names in the loaded model, you might be surprised to find that they look a bit different, e.g. `base_model.model.encoder.layer.0.attention.self.query.lora_A.default.weight`. The difference is the *`.default`* part in the second to last segment. This part exists because PEFT generally allows the addition of multiple adapters at once (using an `nn.ModuleDict` or `nn.ParameterDict` to store them). For example, if you add another adapter called "other", the key for that adapter would be `base_model.model.encoder.layer.0.attention.self.query.lora_A.other.weight`.
|
||||
|
||||
When you call [`~PeftModel.save_pretrained`], the adapter name is stripped from the keys. The reason is that the adapter name is not an important part of the model architecture; it is just an arbitrary name. When loading the adapter, you could choose a totally different name, and the model would still work the same way. This is why the adapter name is not stored in the checkpoint file.
|
||||
|
||||
<Tip>
|
||||
|
||||
If you call `save_pretrained("some/path")` and the adapter name is not `"default"`, the adapter is stored in a sub-directory with the same name as the adapter. So if the name is "other", it would be stored inside of `some/path/other`.
|
||||
|
||||
</Tip>
|
||||
|
||||
In some circumstances, deciding which values to add to the checkpoint file can become a bit more complicated. For example, in PEFT, DoRA is implemented as a special case of LoRA. If you want to convert a DoRA model to PEFT, you should create a LoRA checkpoint with extra entries for DoRA. You can see this in the `__init__` of the previous `LoraLayer` code:
|
||||
|
||||
```python
|
||||
self.lora_magnitude_vector: Optional[torch.nn.ParameterDict] = None # for DoRA
|
||||
```
|
||||
|
||||
This indicates that there is an optional extra parameter per layer for DoRA.
|
||||
|
||||
### adapter_config
|
||||
|
||||
All the other information needed to load a PEFT model is contained in the `adapter_config.json` file. Let's check this file for a LoRA model applied to BERT:
|
||||
|
||||
```json
|
||||
{
|
||||
"alpha_pattern": {},
|
||||
"auto_mapping": {
|
||||
"base_model_class": "BertModel",
|
||||
"parent_library": "transformers.models.bert.modeling_bert"
|
||||
},
|
||||
"base_model_name_or_path": "bert-base-uncased",
|
||||
"bias": "none",
|
||||
"fan_in_fan_out": false,
|
||||
"inference_mode": true,
|
||||
"init_lora_weights": true,
|
||||
"layer_replication": null,
|
||||
"layers_pattern": null,
|
||||
"layers_to_transform": null,
|
||||
"loftq_config": {},
|
||||
"lora_alpha": 8,
|
||||
"lora_dropout": 0.0,
|
||||
"megatron_config": null,
|
||||
"megatron_core": "megatron.core",
|
||||
"modules_to_save": null,
|
||||
"peft_type": "LORA",
|
||||
"r": 8,
|
||||
"rank_pattern": {},
|
||||
"revision": null,
|
||||
"target_modules": [
|
||||
"query",
|
||||
"value"
|
||||
],
|
||||
"task_type": null,
|
||||
"use_dora": false,
|
||||
"use_rslora": false
|
||||
}
|
||||
```
|
||||
|
||||
This contains a lot of entries, and at first glance, it could feel overwhelming to figure out all the right values to put in there. However, most of the entries are not necessary to load the model. This is either because they use the default values and don't need to be added or because they only affect the initialization of the LoRA weights, which is irrelevant when it comes to loading the model. If you find that you don't know what a specific parameter does, e.g., `"use_rslora",` don't add it, and you should be fine. Also note that as more options are added, this file will get more entries in the future, but it should be backward compatible.
|
||||
|
||||
At the minimum, you should include the following entries:
|
||||
|
||||
```json
|
||||
{
|
||||
"target_modules": ["query", "value"],
|
||||
"peft_type": "LORA"
|
||||
}
|
||||
```
|
||||
|
||||
However, adding as many entries as possible, like the rank `r` or the `base_model_name_or_path` (if it's a Transformers model) is recommended. This information can help others understand the model better and share it more easily. To check which keys and values are expected, check out the [config.py](https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/config.py) file (as an example, this is the config file for LoRA) in the PEFT source code.
|
||||
|
||||
## Model storage
|
||||
|
||||
In some circumstances, you might want to store the whole PEFT model, including the base weights. This can be necessary if, for instance, the base model is not available to the users trying to load the PEFT model. You can merge the weights first or convert it into a Transformer model.
|
||||
|
||||
### Merge the weights
|
||||
|
||||
The most straightforward way to store the whole PEFT model is to merge the adapter weights into the base weights:
|
||||
|
||||
```python
|
||||
merged_model = model.merge_and_unload()
|
||||
merged_model.save_pretrained(...)
|
||||
```
|
||||
|
||||
There are some disadvantages to this approach, though:
|
||||
|
||||
- Once [`~LoraModel.merge_and_unload`] is called, you get a basic model without any PEFT-specific functionality. This means you can't use any of the PEFT-specific methods anymore.
|
||||
- You cannot unmerge the weights, load multiple adapters at once, disable the adapter, etc.
|
||||
- Not all PEFT methods support merging weights.
|
||||
- Some PEFT methods may generally allow merging, but not with specific settings (e.g. when using certain quantization techniques).
|
||||
- The whole model will be much larger than the PEFT model, as it will contain all the base weights as well.
|
||||
|
||||
But inference with a merged model should be a bit faster.
|
||||
|
||||
### Convert to a Transformers model
|
||||
|
||||
Another way to save the whole model, assuming the base model is a Transformers model, is to use this hacky approach to directly insert the PEFT weights into the base model and save it, which only works if you "trick" Transformers into believing the PEFT model is not a PEFT model. This only works with LoRA because other adapters are not implemented in Transformers.
|
||||
|
||||
```python
|
||||
model = ... # the PEFT model
|
||||
...
|
||||
# after you finish training the model, save it in a temporary location
|
||||
model.save_pretrained(<temp_location>)
|
||||
# now load this model directly into a transformers model, without the PEFT wrapper
|
||||
# the PEFT weights are directly injected into the base model
|
||||
model_loaded = AutoModel.from_pretrained(<temp_location>)
|
||||
# now make the loaded model believe that it is _not_ a PEFT model
|
||||
model_loaded._hf_peft_config_loaded = False
|
||||
# now when we save it, it will save the whole model
|
||||
model_loaded.save_pretrained(<final_location>)
|
||||
# or upload to Hugging Face Hub
|
||||
model_loaded.push_to_hub(<final_location>)
|
||||
```
|
||||
|
@ -238,3 +238,73 @@ peft_model.print_trainable_parameters()
|
||||
```python
|
||||
print(peft_model.targeted_module_names)
|
||||
```
|
||||
|
||||
## Unsupported module types
|
||||
|
||||
Methods like LoRA only work if the target modules are supported by PEFT. For example, it's possible to apply LoRA to `nn.Linear` and `nn.Conv2d` layers, but not, for instance, to `nn.LSTM`. If you find a layer class you want to apply PEFT to is not supported, you can:
|
||||
|
||||
- define a custom mapping to dynamically dispatch custom modules in LoRA
|
||||
- open an [issue](https://github.com/huggingface/peft/issues) and request the feature where maintainers will implement it or guide you on how to implement it yourself if demand for this module type is sufficiently high
|
||||
|
||||
### Experimental support for dynamic dispatch of custom modules in LoRA
|
||||
|
||||
> [!WARNING]
|
||||
> This feature is experimental and subject to change, depending on its reception by the community. We will introduce a public and stable API if there is significant demand for it.
|
||||
|
||||
PEFT supports an experimental API for custom module types for LoRA. Let's assume you have a LoRA implementation for LSTMs. Normally, you would not be able to tell PEFT to use it, even if it would theoretically work with PEFT. However, this is possible with dynamic dispatch of custom layers.
|
||||
|
||||
The experimental API currently looks like this:
|
||||
|
||||
```python
|
||||
class MyLoraLSTMLayer:
|
||||
...
|
||||
|
||||
base_model = ... # load the base model that uses LSTMs
|
||||
|
||||
# add the LSTM layer names to target_modules
|
||||
config = LoraConfig(..., target_modules=["lstm"])
|
||||
# define a mapping from base layer type to LoRA layer type
|
||||
custom_module_mapping = {nn.LSTM: MyLoraLSTMLayer}
|
||||
# register the new mapping
|
||||
config._register_custom_module(custom_module_mapping)
|
||||
# after registration, create the PEFT model
|
||||
peft_model = get_peft_model(base_model, config)
|
||||
# do training
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
When you call [`get_peft_model`], you will see a warning because PEFT does not recognize the targeted module type. In this case, you can ignore this warning.
|
||||
|
||||
</Tip>
|
||||
|
||||
By supplying a custom mapping, PEFT first checks the base model's layers against the custom mapping and dispatches to the custom LoRA layer type if there is a match. If there is no match, PEFT checks the built-in LoRA layer types for a match.
|
||||
|
||||
Therefore, this feature can also be used to override existing dispatch logic, e.g. if you want to use your own LoRA layer for `nn.Linear` instead of using the one provided by PEFT.
|
||||
|
||||
When creating your custom LoRA module, please follow the same rules as the [existing LoRA modules](https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/layer.py). Some important constraints to consider:
|
||||
|
||||
- The custom module should inherit from `nn.Module` and `peft.tuners.lora.layer.LoraLayer`.
|
||||
- The `__init__` method of the custom module should have the positional arguments `base_layer` and `adapter_name`. After this, there are additional `**kwargs` that you are free to use or ignore.
|
||||
- The learnable parameters should be stored in an `nn.ModuleDict` or `nn.ParameterDict`, where the key corresponds to the name of the specific adapter (remember that a model can have more than one adapter at a time).
|
||||
- The name of these learnable parameter attributes should start with `"lora_"`, e.g. `self.lora_new_param = ...`.
|
||||
- Some methods are optional, e.g. you only need to implement `merge` and `unmerge` if you want to support weight merging.
|
||||
|
||||
Currently, the information about the custom module does not persist when you save the model. When loading the model, you have to register the custom modules again.
|
||||
|
||||
```python
|
||||
# saving works as always and includes the parameters of the custom modules
|
||||
peft_model.save_pretrained(<model-path>)
|
||||
|
||||
# loading the model later:
|
||||
base_model = ...
|
||||
# load the LoRA config that you saved earlier
|
||||
config = LoraConfig.from_pretrained(<model-path>)
|
||||
# register the custom module again, the same way as the first time
|
||||
custom_module_mapping = {nn.LSTM: MyLoraLSTMLayer}
|
||||
config._register_custom_module(custom_module_mapping)
|
||||
# pass the config instance to from_pretrained:
|
||||
peft_model = PeftModel.from_pretrained(model, tmp_path / "lora-custom-module", config=config)
|
||||
```
|
||||
|
||||
If you use this feature and find it useful, or if you encounter problems, let us know by creating an issue or a discussion on GitHub. This allows us to estimate the demand for this feature and add a public API if it is sufficiently high.
|
||||
|
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
@ -40,6 +40,29 @@ from peft import LoraConfig
|
||||
config = LoraConfig(init_lora_weights=False, ...)
|
||||
```
|
||||
|
||||
### PiSSA
|
||||
[PiSSA](https://arxiv.org/abs/2404.02948) initializes the LoRA adapter using the principal singular values and singular vectors. This straightforward modification allows PiSSA to converge more rapidly than LoRA and ultimately attain superior performance. Moreover, PiSSA reduces the quantization error compared to QLoRA, leading to further enhancements.
|
||||
|
||||
Configure the initialization method to "pissa", which may take several minutes to execute SVD on the pre-trained model:
|
||||
```python
|
||||
from peft import LoraConfig
|
||||
config = LoraConfig(init_lora_weights="pissa", ...)
|
||||
```
|
||||
Alternatively, execute fast SVD, which takes only a few seconds. The number of iterations determines the trade-off between the error and computation time:
|
||||
```python
|
||||
lora_config = LoraConfig(init_lora_weights="pissa_niter_[number of iters]", ...)
|
||||
```
|
||||
For detailed instruction on using PiSSA, please follow [these instructions](https://github.com/fxmeng/peft/tree/main/examples/pissa_finetuning).
|
||||
|
||||
### OLoRA
|
||||
[OLoRA](https://arxiv.org/abs/2406.01775) utilizes QR decomposition to initialize the LoRA adapters. OLoRA translates the base weights of the model by a factor of their QR decompositions, i.e., it mutates the weights before performing any training on them. This approach significantly improves stability, accelerates convergence speed, and ultimately achieves superior performance.
|
||||
|
||||
You just need to pass a single additional option to use OLoRA:
|
||||
```python
|
||||
from peft import LoraConfig
|
||||
config = LoraConfig(init_lora_weights="olora", ...)
|
||||
```
|
||||
For more advanced usage, please refer to our [documentation](https://github.com/huggingface/peft/tree/main/examples/olora_finetuning).
|
||||
### LoftQ
|
||||
|
||||
#### Standard approach
|
||||
@ -48,7 +71,7 @@ When quantizing the base model for QLoRA training, consider using the [LoftQ ini
|
||||
|
||||
In general, for LoftQ to work best, it is recommended to target as many layers with LoRA as possible, since those not targeted cannot have LoftQ applied. This means that passing `LoraConfig(..., target_modules="all-linear")` will most likely give the best results. Also, you should use `nf4` as quant type in your quantization config when using 4bit quantization, i.e. `BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")`.
|
||||
|
||||
#### A more convienient way
|
||||
#### A more convenient way
|
||||
|
||||
An easier but more limited way to apply LoftQ initialization is to use the convenience function `replace_lora_weights_loftq`. This takes the quantized PEFT model as input and replaces the LoRA weights in-place with their LoftQ-initialized counterparts.
|
||||
|
||||
@ -99,9 +122,25 @@ from peft import LoraConfig
|
||||
config = LoraConfig(use_dora=True, ...)
|
||||
```
|
||||
|
||||
If parts of the model or the DoRA adapter are offloaded to CPU you can get a significant speedup at the cost of some temporary (ephemeral) VRAM overhead by using `ephemeral_gpu_offload=True` in `config.runtime_config`.
|
||||
|
||||
```py
|
||||
from peft import LoraConfig, LoraRuntimeConfig
|
||||
|
||||
config = LoraConfig(use_dora=True, runtime_config=LoraRuntimeConfig(ephemeral_gpu_offload=True), ...)
|
||||
```
|
||||
|
||||
A `PeftModel` with a DoRA adapter can also be loaded with `ephemeral_gpu_offload=True` flag using the `from_pretrained` method as well as the `load_adapter` method.
|
||||
|
||||
```py
|
||||
from peft import PeftModel
|
||||
|
||||
model = PeftModel.from_pretrained(base_model, peft_model_id, ephemeral_gpu_offload=True)
|
||||
```
|
||||
|
||||
#### Caveats
|
||||
|
||||
- DoRA only supports linear and Conv2d layers at the momement.
|
||||
- DoRA only supports linear and Conv2d layers at the moment.
|
||||
- DoRA introduces a bigger overhead than pure LoRA, so it is recommended to merge weights for inference, see [`LoraModel.merge_and_unload`].
|
||||
- DoRA should work with weights quantized with bitsandbytes ("QDoRA"). However, issues have been reported when using QDoRA with DeepSpeed Zero2.
|
||||
|
||||
@ -121,15 +160,56 @@ An approach used to improve the performance of models is to expand a model by du
|
||||
config = LoraConfig(layer_replication=[[0,4], [2,5]], ...)
|
||||
```
|
||||
|
||||
Assuming the original model had 5 layers `[0, 1, 2 ,3, 4]`, this would create a model with 7 layers arranged as `[0, 1, 2, 3, 2, 3, 4]`. This follows the [mergekit](https://github.com/arcee-ai/mergekit) pass through merge convention where sequences of layers specified as start inclusive and end exclusive tuples are stacked to build the final model. Each layer in the final model gets its own distinct set of LoRA adpaters.
|
||||
Assuming the original model had 5 layers `[0, 1, 2 ,3, 4]`, this would create a model with 7 layers arranged as `[0, 1, 2, 3, 2, 3, 4]`. This follows the [mergekit](https://github.com/arcee-ai/mergekit) pass through merge convention where sequences of layers specified as start inclusive and end exclusive tuples are stacked to build the final model. Each layer in the final model gets its own distinct set of LoRA adapters.
|
||||
|
||||
[Fewshot-Metamath-OrcaVicuna-Mistral-10B](https://huggingface.co/abacusai/Fewshot-Metamath-OrcaVicuna-Mistral-10B) is an example of a model trained using this method on Mistral-7B expanded to 10B. The
|
||||
[adapter_config.json](https://huggingface.co/abacusai/Fewshot-Metamath-OrcaVicuna-Mistral-10B/blob/main/adapter_config.json) shows a sample LoRA adapter config applying this method for fine-tuning.
|
||||
|
||||
## Merge adapters
|
||||
## Optimizers
|
||||
|
||||
LoRA training can optionally include special purpose optimizers. Currently the only such optimizer is LoRA+.
|
||||
|
||||
### LoRA+ optimized LoRA
|
||||
|
||||
LoRA training can be optimized using [LoRA+](https://arxiv.org/abs/2402.12354), which uses different learning rates for the adapter matrices A and B, shown to increase finetuning speed by up to 2x and performance by 1-2%.
|
||||
|
||||
```py
|
||||
from peft import LoraConfig, get_peft_model
|
||||
from peft.optimizers import create_loraplus_optimizer
|
||||
from transformers import Trainer
|
||||
import bitsandbytes as bnb
|
||||
|
||||
base_model = ...
|
||||
config = LoraConfig(...)
|
||||
model = get_peft_model(base_model, config)
|
||||
|
||||
optimizer = create_loraplus_optimizer(
|
||||
model=model,
|
||||
optimizer_cls=bnb.optim.Adam8bit,
|
||||
lr=5e-5,
|
||||
loraplus_lr_ratio=16,
|
||||
)
|
||||
scheduler = None
|
||||
|
||||
...
|
||||
trainer = Trainer(
|
||||
...,
|
||||
optimizers=(optimizer, scheduler),
|
||||
)
|
||||
```
|
||||
|
||||
## Merge LoRA weights into the base model
|
||||
|
||||
While LoRA is significantly smaller and faster to train, you may encounter latency issues during inference due to separately loading the base model and the LoRA adapter. To eliminate latency, use the [`~LoraModel.merge_and_unload`] function to merge the adapter weights with the base model. This allows you to use the newly merged model as a standalone model. The [`~LoraModel.merge_and_unload`] function doesn't keep the adapter weights in memory.
|
||||
|
||||
Below is a diagram that explains the intuition of LoRA adapter merging:
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/lora_diagram.png"/>
|
||||
</div>
|
||||
|
||||
We show in the snippets below how to run that using PEFT.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
from peft import PeftModel
|
||||
@ -244,7 +324,7 @@ model.delete_adapter("dpo")
|
||||
|
||||
Normally, each inference batch has to use the same adapter(s) in PEFT. This can sometimes be annoying, because we may have batches that contain samples intended to be used with different LoRA adapters. For example, we could have a base model that works well in English and two more LoRA adapters, one for French and one for German. Usually, we would have to split our batches such that each batch only contains samples of one of the languages, we cannot combine different languages in the same batch.
|
||||
|
||||
Thankfully, it is possible to mix different LoRA adapters in the same batch using the `adapter_name` argument. Below, we show an examle of how this works in practice. First, let's load the base model, English, and the two adapters, French and German, like this:
|
||||
Thankfully, it is possible to mix different LoRA adapters in the same batch using the `adapter_name` argument. Below, we show an example of how this works in practice. First, let's load the base model, English, and the two adapters, French and German, like this:
|
||||
|
||||
```python
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
@ -289,6 +369,8 @@ output = peft_model.generate(**inputs, adapter_names=adapter_names, max_new_toke
|
||||
|
||||
Note that the order does not matter here, i.e. the samples in the batch don't need to be grouped by adapter as in the example above. We just need to ensure that the `adapter_names` argument is aligned correctly with the samples.
|
||||
|
||||
Additionally, the same approach also works with the `modules_to_save` feature, which allows for saving and reusing specific neural network layers, such as custom heads for classification tasks, across different LoRA adapters.
|
||||
|
||||
### Caveats
|
||||
|
||||
Using this features has some drawbacks, namely:
|
||||
@ -298,6 +380,7 @@ Using this features has some drawbacks, namely:
|
||||
- You cannot pass `adapter_names` when some adapter weights where merged with base weight using the `merge_adapter` method. Please unmerge all adapters first by calling `model.unmerge_adapter()`.
|
||||
- For obvious reasons, this cannot be used after calling `merge_and_unload()`, since all the LoRA adapters will be merged into the base weights in this case.
|
||||
- This feature does not currently work with DoRA, so set `use_dora=False` in your `LoraConfig` if you want to use it.
|
||||
- The `modules_to_save` feature is currently only supported for the layers of types `Linear`, `Embedding`, `Conv2d` and `Conv1d`.
|
||||
- There is an expected overhead for inference with `adapter_names`, especially if the amount of different adapters in the batch is high. This is because the batch size is effectively reduced to the number of samples per adapter. If runtime performance is your top priority, try the following:
|
||||
- Increase the batch size.
|
||||
- Try to avoid having a large number of different adapters in the same batch, prefer homogeneous batches. This can be achieved by buffering samples with the same adapter and only perform inference with a small handfull of different adapters.
|
||||
|
@ -25,6 +25,8 @@ Check the table below to see when you should inject adapters.
|
||||
| the model is modified inplace, keeping all the original attributes and methods | manually write the `from_pretrained` and `save_pretrained` utility functions from Hugging Face to save and load adapters |
|
||||
| works for any `torch` module and modality | doesn't work with any of the utility methods provided by `PeftModel` such as disabling and merging adapters |
|
||||
|
||||
## Creating a new PEFT model
|
||||
|
||||
To perform the adapter injection, use the [`inject_adapter_in_model`] method. This method takes 3 arguments, the PEFT config, the model, and an optional adapter name. You can also attach multiple adapters to the model if you call [`inject_adapter_in_model`] multiple times with different adapter names.
|
||||
|
||||
For example, to inject LoRA adapters into the `linear` submodule of the `DummyModel` module:
|
||||
@ -85,6 +87,8 @@ DummyModel(
|
||||
)
|
||||
```
|
||||
|
||||
## Saving the model
|
||||
|
||||
To only save the adapter, use the [`get_peft_model_state_dict`] function:
|
||||
|
||||
```python
|
||||
@ -95,3 +99,28 @@ print(peft_state_dict)
|
||||
```
|
||||
|
||||
Otherwise, `model.state_dict()` returns the full state dict of the model.
|
||||
|
||||
## Loading the model
|
||||
|
||||
After loading the saved `state_dict`, it can be applied using the [`set_peft_model_state_dict`] function:
|
||||
|
||||
```python
|
||||
from peft import set_peft_model_state_dict
|
||||
|
||||
model = DummyModel()
|
||||
model = inject_adapter_in_model(lora_config, model)
|
||||
outcome = set_peft_model_state_dict(model, peft_state_dict)
|
||||
# check that there were no wrong keys
|
||||
print(outcome.unexpected_keys)
|
||||
```
|
||||
|
||||
If injecting the adapter is slow or you need to load a large number of adapters, you may use an optimization that allows to create an "empty" adapter on meta device and only fills the weights with real weights when the [`set_peft_model_state_dict`] is called. To do this, pass `low_cpu_mem_usage=True` to both [`inject_adapter_in_model`] and [`set_peft_model_state_dict`].
|
||||
|
||||
```python
|
||||
model = DummyModel()
|
||||
model = inject_adapter_in_model(lora_config, model, low_cpu_mem_usage=True)
|
||||
|
||||
print(model.linear.lora_A["default"].weight.device.type == "meta") # should be True
|
||||
set_peft_model_state_dict(model, peft_state_dict, low_cpu_mem_usage=True)
|
||||
print(model.linear.lora_A["default"].weight.device.type == "cpu") # should be True
|
||||
```
|
||||
|
@ -138,3 +138,20 @@ print(tokenizer.decode(outputs[0]))
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
|
||||
## Merging (IA)³ Models
|
||||
The (IA)³ models facilitate linear merging of adapters. To merge adapters in an (IA)³ model, utilize the `add_weighted_adapter` method from the `IA3Model` class. This method is analogous to the `add_weighted_adapter` method used in `LoraModel`, with the key difference being the absence of the `combination_type` parameter. For example, to merge three (IA)³ adapters into a PEFT model, you would proceed as follows:
|
||||
|
||||
```py
|
||||
adapters = ["adapter1", "adapter2", "adapter3"]
|
||||
weights = [0.4, 0.3, 0.3]
|
||||
adapter_name = "merge"
|
||||
model.add_weighted_adapter(adapters, weights, adapter_name)
|
||||
```
|
||||
|
||||
It is recommended that the weights sum to 1.0 to preserve the scale of the model. The merged model can then be set as the active model using the `set_adapter` method:
|
||||
|
||||
```py
|
||||
model.set_adapter("merge")
|
||||
```
|
||||
|
@ -128,6 +128,65 @@ quantized_model = get_peft_model(quantized_model, peft_config)
|
||||
|
||||
You can refer to the [Google Colab](https://colab.research.google.com/drive/12GTp1FCj5_0SnnNQH18h_2XFh9vS_guX?usp=sharing) example for an overview of AQLM+LoRA finetuning.
|
||||
|
||||
## EETQ quantization
|
||||
|
||||
You can also perform LoRA fine-tuning on EETQ quantized models. [EETQ](https://github.com/NetEase-FuXi/EETQ) package offers simple and efficient way to perform 8-bit quantization, which is claimed to be faster than the `LLM.int8()` algorithm. First, make sure that you have a transformers version that is compatible with EETQ (e.g. by installing it from latest pypi or from source).
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import EetqConfig
|
||||
|
||||
config = EetqConfig("int8")
|
||||
```
|
||||
|
||||
Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config)
|
||||
```
|
||||
|
||||
and create a `LoraConfig` and pass it to `get_peft_model`:
|
||||
|
||||
```py
|
||||
from peft import LoraConfig, get_peft_model
|
||||
|
||||
config = LoraConfig(
|
||||
r=16,
|
||||
lora_alpha=8,
|
||||
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
|
||||
lora_dropout=0.05,
|
||||
bias="none",
|
||||
task_type="CAUSAL_LM"
|
||||
)
|
||||
|
||||
model = get_peft_model(model, config)
|
||||
```
|
||||
|
||||
## HQQ quantization
|
||||
|
||||
The models that is quantized using Half-Quadratic Quantization of Large Machine Learning Models ([HQQ](https://mobiusml.github.io/hqq_blog/)) support LoRA adapter tuning. To tune the quantized model, you'll need to install the `hqq` library with: `pip install hqq`.
|
||||
|
||||
```python
|
||||
from hqq.engine.hf import HQQModelForCausalLM
|
||||
|
||||
quantized_model = HQQModelForCausalLM.from_quantized(save_dir_or_hfhub, device='cuda')
|
||||
peft_config = LoraConfig(...)
|
||||
quantized_model = get_peft_model(quantized_model, peft_config)
|
||||
```
|
||||
|
||||
Or using transformers version that is compatible with HQQ (e.g. by installing it from latest pypi or from source).
|
||||
|
||||
```python
|
||||
from transformers import HqqConfig, AutoModelForCausalLM
|
||||
|
||||
quant_config = HqqConfig(nbits=4, group_size=64)
|
||||
quantized_model = AutoModelForCausalLM.from_pretrained(save_dir_or_hfhub, device_map=device_map, quantization_config=quant_config)
|
||||
peft_config = LoraConfig(...)
|
||||
quantized_model = get_peft_model(quantized_model, peft_config)
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
If you're interested in learning more about quantization, the following may be helpful:
|
||||
|
76
docs/source/developer_guides/torch_compile.md
Normal file
76
docs/source/developer_guides/torch_compile.md
Normal file
@ -0,0 +1,76 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# torch.compile
|
||||
|
||||
In PEFT, [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) works for some but not all features. The reason why it won't always work is because PEFT is highly dynamic in certain places (loading and switching between multiple adapters, for instance), which can cause trouble for `torch.compile`. In other places, `torch.compile` may work, but won't be as fast as expected because of graph breaks.
|
||||
|
||||
If you don't see an error, it doesn't necessarily mean that `torch.compile` worked correctly. It might give you an output, but the output is incorrect. This guide describes what works with `torch.compile` and what doesn't.
|
||||
|
||||
> [!TIP]
|
||||
> Unless indicated otherwise, the default `torch.compile` settings were used.
|
||||
|
||||
## Training and inference with `torch.compile`
|
||||
|
||||
These features **work** with `torch.compile`. Everything listed below was tested with a causal LM:
|
||||
|
||||
- Training with `Trainer` from 🤗 transformers
|
||||
- Training with a custom PyTorch loop
|
||||
- Inference
|
||||
- Generation
|
||||
|
||||
The following adapters were tested successfully:
|
||||
|
||||
- AdaLoRA
|
||||
- BOFT
|
||||
- IA³
|
||||
- Layer Norm Tuning
|
||||
- LoHa
|
||||
- LoRA
|
||||
- LoRA + DoRA
|
||||
- OFT
|
||||
- VeRA
|
||||
- HRA
|
||||
|
||||
The following adapters **don't work** correctly for training or inference when using `torch.compile`:
|
||||
|
||||
- LoKr
|
||||
- LoRA targeting embedding layers
|
||||
|
||||
## Advanced PEFT features with `torch.compile`
|
||||
|
||||
Below are some of the more advanced PEFT features that **work**. They were all tested with LoRA.
|
||||
|
||||
- `modules_to_save` (i.e. `config = LoraConfig(..., modules_to_save=...)`)
|
||||
- Merging adapters (one or multiple)
|
||||
- Merging multiple adapters into one adapter (i.e. calling `model.add_weighted_adapter(...)`)
|
||||
|
||||
Generally, we can expect that if a feature works correctly with LoRA and is also supported by other adapter types, it should also work for that adapter type.
|
||||
|
||||
The more advanced PEFT features below **don't work** in conjunction with `torch.compile`. Tests were run with LoRA:
|
||||
|
||||
- Using PEFT adapters with quantization (bitsandbytes)
|
||||
- Inference with multiple adapters
|
||||
- Unloading (i.e. calling `model.merge_and_unload()`)
|
||||
- Disabling adapters (i.e. using `with model.disable_adapter()`)
|
||||
- Mixed adapter batches (i.e. calling `model(batch, adapter_names=["__base__", "default", "other", ...])`)
|
||||
|
||||
## Test cases
|
||||
|
||||
All the use cases listed above are tested inside of [`peft/tests/test_torch_compile.py`](https://github.com/huggingface/peft/blob/main/tests/test_torch_compile.py). If you want to check in more detail how we tested a certain feature, please go to that file and check the test that corresponds to your use case.
|
||||
|
||||
> [!TIP]
|
||||
> If you have another use case where you know that `torch.compile` does or does not work with PEFT, please contribute by letting us know or by opening a PR to add this use case to the covered test cases.
|
@ -69,6 +69,12 @@ trainer = Trainer(model=peft_model, fp16=True, ...)
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
Starting from PEFT verion v0.12.0, PEFT automatically promotes the dtype of adapter weights from `torch.float16` and `torch.bfloat16` to `torch.float32` where appropriate. To _prevent_ this behavior, you can pass `autocast_adapter_dtype=False` to [`~get_peft_model`], to [`~PeftModel.from_pretrained`], and to [`~PeftModel.load_adapter`].
|
||||
|
||||
</Tip>
|
||||
|
||||
## Bad results from a loaded PEFT model
|
||||
|
||||
There can be several reasons for getting a poor result from a loaded PEFT model which are listed below. If you're still unable to troubleshoot the problem, see if anyone else had a similar [issue](https://github.com/huggingface/peft/issues) on GitHub, and if you can't find any, open a new issue.
|
||||
@ -129,9 +135,152 @@ If the model's embedding layer doesn't follow the Transformer's naming scheme, y
|
||||
```python
|
||||
model = get_peft_model(...)
|
||||
# train the model
|
||||
model.save_adapter("my_adapter", save_embedding_layers=True)
|
||||
model.save_pretrained("my_adapter", save_embedding_layers=True)
|
||||
```
|
||||
|
||||
For inference, load the base model first and resize it the same way you did before you trained the model. After you've resized the base model, you can load the PEFT checkpoint.
|
||||
|
||||
For a complete example, please check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_with_additional_tokens.ipynb).
|
||||
|
||||
### Check layer and model status
|
||||
|
||||
Sometimes a PEFT model can end up in a bad state, especially when handling multiple adapters. There can be some confusion around what adapters exist, which one is active, which one is merged, etc. To help investigate this issue, call the [`~peft.PeftModel.get_layer_status`] and the [`~peft.PeftModel.get_model_status`] methods.
|
||||
|
||||
The [`~peft.PeftModel.get_layer_status`] method gives you a detailed overview of each targeted layer's active, merged, and available adapters.
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoModel
|
||||
>>> from peft import get_peft_model, LoraConfig
|
||||
|
||||
>>> model_id = "google/flan-t5-small"
|
||||
>>> model = AutoModel.from_pretrained(model_id)
|
||||
>>> model = get_peft_model(model, LoraConfig())
|
||||
|
||||
>>> model.get_layer_status()
|
||||
[TunerLayerStatus(name='model.encoder.block.0.layer.0.SelfAttention.q',
|
||||
module_type='lora.Linear',
|
||||
enabled=True,
|
||||
active_adapters=['default'],
|
||||
merged_adapters=[],
|
||||
requires_grad={'default': True},
|
||||
available_adapters=['default']),
|
||||
TunerLayerStatus(name='model.encoder.block.0.layer.0.SelfAttention.v',
|
||||
module_type='lora.Linear',
|
||||
enabled=True,
|
||||
active_adapters=['default'],
|
||||
merged_adapters=[],
|
||||
requires_grad={'default': True},
|
||||
available_adapters=['default']),
|
||||
...]
|
||||
|
||||
>>> model.get_model_status()
|
||||
TunerModelStatus(
|
||||
base_model_type='T5Model',
|
||||
adapter_model_type='LoraModel',
|
||||
peft_types={'default': 'LORA'},
|
||||
trainable_params=344064,
|
||||
total_params=60855680,
|
||||
num_adapter_layers=48,
|
||||
enabled=True,
|
||||
active_adapters=['default'],
|
||||
merged_adapters=[],
|
||||
requires_grad={'default': True},
|
||||
available_adapters=['default'],
|
||||
)
|
||||
```
|
||||
|
||||
In the model state output, you should look out for entries that say `"irregular"`. This means PEFT detected an inconsistent state in the model. For instance, if `merged_adapters="irregular"`, it means that for at least one adapter, it was merged on some target modules but not on others. The inference results will most likely be incorrect as a result.
|
||||
|
||||
The best way to resolve this issue is to reload the whole model and adapter checkpoint(s). Ensure that you don't perform any incorrect operations on the model, e.g. manually merging adapters on some modules but not others.
|
||||
|
||||
Convert the layer status into a pandas `DataFrame` for an easier visual inspection.
|
||||
|
||||
```python
|
||||
from dataclasses import asdict
|
||||
import pandas as pd
|
||||
|
||||
df = pd.DataFrame(asdict(layer) for layer in model.get_layer_status())
|
||||
```
|
||||
|
||||
It is possible to get this information for non-PEFT models if they are using PEFT layers under the hood, but some information like the `base_model_type` or the `peft_types` cannot be determined in that case. As an example, you can call this on a [diffusers](https://huggingface.co/docs/diffusers/index) model like so:
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from diffusers import StableDiffusionPipeline
|
||||
>>> from peft import get_model_status, get_layer_status
|
||||
|
||||
>>> path = "runwayml/stable-diffusion-v1-5"
|
||||
>>> lora_id = "takuma104/lora-test-text-encoder-lora-target"
|
||||
>>> pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
||||
>>> pipe.load_lora_weights(lora_id, adapter_name="adapter-1")
|
||||
>>> pipe.load_lora_weights(lora_id, adapter_name="adapter-2")
|
||||
>>> pipe.set_lora_device(["adapter-2"], "cuda")
|
||||
>>> get_layer_status(pipe.text_encoder)
|
||||
[TunerLayerStatus(name='text_model.encoder.layers.0.self_attn.k_proj',
|
||||
module_type='lora.Linear',
|
||||
enabled=True,
|
||||
active_adapters=['adapter-2'],
|
||||
merged_adapters=[],
|
||||
requires_grad={'adapter-1': False, 'adapter-2': True},
|
||||
available_adapters=['adapter-1', 'adapter-2'],
|
||||
devices={'adapter-1': ['cpu'], 'adapter-2': ['cuda']}),
|
||||
TunerLayerStatus(name='text_model.encoder.layers.0.self_attn.v_proj',
|
||||
module_type='lora.Linear',
|
||||
enabled=True,
|
||||
active_adapters=['adapter-2'],
|
||||
merged_adapters=[],
|
||||
requires_grad={'adapter-1': False, 'adapter-2': True},
|
||||
devices={'adapter-1': ['cpu'], 'adapter-2': ['cuda']}),
|
||||
...]
|
||||
|
||||
>>> get_model_status(pipe.unet)
|
||||
TunerModelStatus(
|
||||
base_model_type='other',
|
||||
adapter_model_type='None',
|
||||
peft_types={},
|
||||
trainable_params=797184,
|
||||
total_params=861115332,
|
||||
num_adapter_layers=128,
|
||||
enabled=True,
|
||||
active_adapters=['adapter-2'],
|
||||
merged_adapters=[],
|
||||
requires_grad={'adapter-1': False, 'adapter-2': True},
|
||||
available_adapters=['adapter-1', 'adapter-2'],
|
||||
devices={'adapter-1': ['cpu'], 'adapter-2': ['cuda']},
|
||||
)
|
||||
```
|
||||
|
||||
## Speed
|
||||
|
||||
### Loading adapter weights is slow
|
||||
|
||||
Loading adapters like LoRA weights should generally be fast compared to loading the base model. However, there can be use cases where the adapter weights are quite large or where users need to load a large number of adapters -- the loading time can add up in this case. The reason for this is that the adapter weights are first initialized and then overridden by the loaded weights, which is wasteful. To speed up the loading time, you can pass the `low_cpu_mem_usage=True` argument to [`~PeftModel.from_pretrained`] and [`~PeftModel.load_adapter`].
|
||||
|
||||
<Tip>
|
||||
|
||||
If this option works well across different use casese, it may become the default for adapter loading in the future.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
## Reproducibility
|
||||
|
||||
### Models using batch norm
|
||||
|
||||
When loading a trained PEFT model where the base model uses batch norm (e.g. `torch.nn.BatchNorm1d` or `torch.nn.BatchNorm2d`), you may find that you cannot reproduce the exact same outputs. This is because the batch norm layers keep track of running stats during training, but these stats are not part of the PEFT checkpoint. Therefore, when you load the PEFT model, the running stats of the base model will be used (i.e. from before training with PEFT).
|
||||
|
||||
Depending on your use case, this may not be a big deal. If, however, you need your outputs to be 100% reproducible, you can achieve this by adding the batch norm layers to `modules_to_save`. Below is an example of this using resnet and LoRA. Notice that we set `modules_to_save=["classifier", "normalization"]`. We need the `"classifier"` argument because our task is image classification, and we add the `"normalization"` argument to ensure that the batch norm layers are saved in the PEFT checkpoint.
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForImageClassification
|
||||
from peft import LoraConfig, get_peft_model
|
||||
|
||||
model_id = "microsoft/resnet-18"
|
||||
base_model = AutoModelForImageClassification.from_pretrained(self.model_id)
|
||||
config = LoraConfig(
|
||||
target_modules=["convolution"],
|
||||
modules_to_save=["classifier", "normalization"],
|
||||
),
|
||||
```
|
||||
|
||||
Depending on the type of model you use, the batch norm layers could have different names than `"normalization"`, so please ensure that the name matches your model architecture.
|
||||
|
38
docs/source/package_reference/fourierft.md
Normal file
38
docs/source/package_reference/fourierft.md
Normal file
@ -0,0 +1,38 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# FourierFT: Discrete Fourier Transformation Fine-Tuning
|
||||
|
||||
[FourierFT](https://huggingface.co/papers/2405.03003) is a parameter-efficient fine-tuning technique that leverages Discrete Fourier Transform to compress the model's tunable weights. This method outperforms LoRA in the GLUE benchmark and common ViT classification tasks using much less parameters.
|
||||
|
||||
FourierFT currently has the following constraints:
|
||||
|
||||
- Only `nn.Linear` layers are supported.
|
||||
- Quantized layers are not supported.
|
||||
|
||||
If these constraints don't work for your use case, consider other methods instead.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
> Low-rank adaptation (LoRA) has recently gained much interest in fine-tuning foundation models. It effectively reduces the number of trainable parameters by incorporating low-rank matrices A and B to represent the weight change, i.e., Delta W=BA. Despite LoRA's progress, it faces storage challenges when handling extensive customization adaptations or larger base models. In this work, we aim to further compress trainable parameters by enjoying the powerful expressiveness of the Fourier transform. Specifically, we introduce FourierFT, which treats Delta W as a matrix in the spatial domain and learns only a small fraction of its spectral coefficients. With the trained spectral coefficients, we implement the inverse discrete Fourier transform to recover Delta W. Empirically, our FourierFT method shows comparable or better performance with fewer parameters than LoRA on various tasks, including natural language understanding, natural language generation, instruction tuning, and image classification. For example, when performing instruction tuning on the LLaMA2-7B model, FourierFT surpasses LoRA with only 0.064M trainable parameters, compared to LoRA's 33.5M.
|
||||
|
||||
## FourierFTConfig
|
||||
|
||||
[[autodoc]] tuners.fourierft.config.FourierFTConfig
|
||||
|
||||
## FourierFTModel
|
||||
|
||||
[[autodoc]] tuners.fourierft.model.FourierFTModel
|
17
docs/source/package_reference/helpers.md
Normal file
17
docs/source/package_reference/helpers.md
Normal file
@ -0,0 +1,17 @@
|
||||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Helper methods
|
||||
|
||||
A collection of helper functions for PEFT.
|
||||
|
||||
## Checking if a model is a PEFT model
|
||||
|
||||
[[autodoc]] helpers.check_if_peft_model
|
||||
- all
|
||||
|
||||
## Temporarily Rescaling Adapter Scale in LoraLayer Modules
|
||||
|
||||
[[autodoc]] helpers.rescale_adapter_scale
|
||||
- all
|
34
docs/source/package_reference/layernorm_tuning.md
Normal file
34
docs/source/package_reference/layernorm_tuning.md
Normal file
@ -0,0 +1,34 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# LayerNorm Tuning
|
||||
|
||||
LayerNorm Tuning ([LN Tuning](https://huggingface.co/papers/2312.11420)) is a PEFT method that only fine-tunes the parameters of the LayerNorm layers in a model.
|
||||
The paper has tested the performance of this method on large language models and has shown that it can achieve strong performance with a significant reduction in the number of trainable parameters and GPU memory usage.
|
||||
However, the method is not limited to language models and can be applied to any model that uses LayerNorm layers.
|
||||
In this implementation, the default is that all layernorm layers inside a model is finetuned, but it could be used to target other layer types such as `MLP` or `Attention` layers, this can be done by specifying the `target_modules` in the `LNTuningConfig`.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*This paper introduces an efficient strategy to transform Large Language Models (LLMs) into Multi-Modal Large Language Models (MLLMs). By conceptualizing this transformation as a domain adaptation process, i.e., transitioning from text understanding to embracing multiple modalities, we intriguingly note that, within each attention block, tuning LayerNorm suffices to yield strong performance. Moreover, when benchmarked against other tuning approaches like full parameter finetuning or LoRA, its benefits on efficiency are substantial. For example, when compared to LoRA on a 13B model scale, performance can be enhanced by an average of over 20% across five multi-modal tasks, and meanwhile, results in a significant reduction of trainable parameters by 41.9% and a decrease in GPU memory usage by 17.6%. On top of this LayerNorm strategy, we showcase that selectively tuning only with conversational data can improve efficiency further. Beyond these empirical outcomes, we provide a comprehensive analysis to explore the role of LayerNorm in adapting LLMs to the multi-modal domain and improving the expressive power of the model.*
|
||||
|
||||
## LNTuningConfig
|
||||
|
||||
[[autodoc]] tuners.ln_tuning.config.LNTuningConfig
|
||||
|
||||
## LNTuningModel
|
||||
|
||||
[[autodoc]] tuners.ln_tuning.model.LNTuningModel
|
@ -71,3 +71,7 @@ A `PeftModel` for mixing different adapter types (e.g. LoRA and LoHa).
|
||||
[[autodoc]] utils.get_peft_model_state_dict
|
||||
|
||||
[[autodoc]] utils.prepare_model_for_kbit_training
|
||||
|
||||
[[autodoc]] get_layer_status
|
||||
|
||||
[[autodoc]] get_model_status
|
||||
|
40
docs/source/package_reference/vblora.md
Normal file
40
docs/source/package_reference/vblora.md
Normal file
@ -0,0 +1,40 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# VB-LoRA: Extreme Parameter Efficient Fine-Tuning with Vector Banks
|
||||
|
||||
## Overview
|
||||
|
||||
[VB-LoRA](https://arxiv.org/abs/2405.15179) is a parameter-efficient fine-tuning technique that extends LoRA by learning a fine-grained parameter-sharing scheme at the sub-vector level, achieving significantly higher parameter efficiency. This makes VB-LoRA especially useful in scenarios where storage and transmission costs are critical. It works by decomposing low-rank matrices—from different layers and modules such as K, Q, V, and FFN—into sub-vectors, which are then globally shared through a vector bank.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*As the adoption of large language models increases and the need for per-user or per-task model customization grows, the parameter-efficient fine-tuning (PEFT) methods, such as low-rank adaptation (LoRA) and its variants, incur substantial storage and transmission costs. To further reduce stored parameters, we introduce a "divide-and-share" paradigm that breaks the barriers of low-rank decomposition across matrix dimensions, modules and layers by sharing parameters globally via a vector bank. As an instantiation of the paradigm to LoRA, our proposed VB-LoRA composites all the low-rank matrices of LoRA from a shared vector bank with a differentiable top-k admixture module. VB-LoRA achieves extreme parameter efficiency while maintaining comparable or better performance compared to state-of-the-art PEFT methods. Extensive experiments demonstrate the effectiveness of VB-LoRA on natural language understanding, natural language generation, and instruction tuning tasks. When fine-tuning the Llama2-13B model, VB-LoRA only uses 0.4% of LoRA's stored parameters, yet achieves superior results.*
|
||||
|
||||
## Usage Tips
|
||||
|
||||
- VB-LoRA utilizes a sparse top-k module to learn the sharing machanism. When saving adapter parameters, you can either save only the top-k weights and their indices by setting `save_only_topk_weights = True` in `VBLoRAConfig`, or save all the trainable logits by setting it to `False`. Enabling `save_only_topk_weights = True` significantly reduces storage space; for instance, in Llama2-7B, the storage file size decreases from 308MB to 2.5MB. Note that models saved with `save_only_topk_weights = True` are intended for merging or inference only and cannot be used to resume training.
|
||||
|
||||
- VB-LoRA has two sets of training parameters: vector bank parameters and logit parameters. In practice, we found that logit parameters require a higher learning rate, while vector bank parameters require a lower learning rate. When using the AdamW optimizer, typical learning rates are 0.01 for logits and 0.001 for vector bank parameters.
|
||||
|
||||
## VBLoRAConfig
|
||||
|
||||
[[autodoc]] tuners.vblora.config.VBLoRAConfig
|
||||
|
||||
## VBLoRAModel
|
||||
|
||||
[[autodoc]] tuners.vblora.model.VBLoRAModel
|
||||
|
@ -20,9 +20,10 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
When saving the adapter parameters, it's possible to eschew storing the low rank matrices by setting `save_projection=False` on the `VeraConfig`. In that case, these matrices will be restored based on the fixed random seed from the `projection_prng_key` argument. This cuts down on the size of the checkpoint, but we cannot guarantee reproducibility on all devices and for all future versions of PyTorch. If you want to ensure reproducibility, set `save_projection=True` (which is the default).
|
||||
|
||||
To handle different shapes of adapted layers, VeRA initializes shared A and B matrices with the largest required size for each dimension. During the forward pass, submatrices A and B for a given layer are sliced out from these shared matrices and used as described in the paper. For example, adapting two linear layers of shapes (100, 20) and (80, 50) will create A and B matrices of shapes (rank, 50) and (100, rank) respectively. Then, to adapt a layer of shape (100, 20), submatrices A and B of shapes (rank, 20) and (100, rank) will be extracted.
|
||||
|
||||
VeRA currently has the following constraints:
|
||||
|
||||
- All targeted parameters must have the same shape.
|
||||
- Only `nn.Linear` layers are supported.
|
||||
- Quantized layers are not supported.
|
||||
|
||||
|
56
docs/source/package_reference/xlora.md
Normal file
56
docs/source/package_reference/xlora.md
Normal file
@ -0,0 +1,56 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# X-LoRA
|
||||
|
||||
Mixture of LoRA Experts ([X-LoRA](https://arxiv.org/abs/2402.07148)) is a PEFT method enabling sparse or dense mixture of LoRA experts based on a high granularity (token, layer, sequence) scalings matrix. This leverages frozen LoRA adapters and a frozen base model to drastically reduces the number of parameters that need to be fine-tuned.
|
||||
|
||||
A unique aspect of X-LoRA is its versatility: it can be applied to any `transformers` base model with LoRA adapters. This means that, despite the mixture of experts strategy, no changes to the model code must be made.
|
||||
|
||||
The below graphic demonstrates how the scalings change for different prompts for each token. This highlights the activation of different adapters as the generation progresses and the sequence creates new context.
|
||||
|
||||

|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*We report a mixture of expert strategy to create fine-tuned large language models using a deep layer-wise token-level approach based on low-rank adaptation (LoRA). Starting with a set of pre-trained LoRA adapters, our gating strategy uses the hidden states to dynamically mix adapted layers, allowing the resulting X-LoRA model to draw upon different capabilities and create never-before-used deep layer-wise combinations to solve tasks. The design is inspired by the biological principles of universality and diversity, where neural network building blocks are reused in different hierarchical manifestations. Hence, the X-LoRA model can be easily implemented for any existing large language model (LLM) without a need for modifications of the underlying structure. We develop a tailored X-LoRA model that offers scientific capabilities including forward/inverse analysis tasks and enhanced reasoning capability, focused on biomaterial analysis, protein mechanics and design. The impact of this work include access to readily expandable and adaptable models with strong domain knowledge and the capability to integrate across areas of knowledge. Featuring experts in biology, mathematics, reasoning, bio-inspired materials, mechanics and materials, chemistry, protein biophysics, mechanics and quantum-mechanics based molecular properties, we conduct a series of physics-focused case studies. We examine knowledge recall, protein mechanics forward/inverse tasks, protein design, adversarial agentic modeling including ontological knowledge graph construction, as well as molecular design. The model is capable not only of making quantitative predictions of nanomechanical properties of proteins or quantum mechanical molecular properties, but also reasons over the results and correctly predicts likely mechanisms that explain distinct molecular behaviors.*.
|
||||
|
||||
Please cite X-LoRA as:
|
||||
```bibtex
|
||||
@article{10.1063/5.0203126,
|
||||
author = {Buehler, Eric L. and Buehler, Markus J.},
|
||||
title = "{X-LoRA: Mixture of low-rank adapter experts, a flexible framework for large language models with applications in protein mechanics and molecular design}",
|
||||
journal = {APL Machine Learning},
|
||||
volume = {2},
|
||||
number = {2},
|
||||
pages = {026119},
|
||||
year = {2024},
|
||||
month = {05},
|
||||
abstract = "{We report a mixture of expert strategy to create fine-tuned large language models using a deep layer-wise token-level approach based on low-rank adaptation (LoRA). Starting with a set of pre-trained LoRA adapters, our gating strategy uses the hidden states to dynamically mix adapted layers, allowing the resulting X-LoRA model to draw upon different capabilities and create never-before-used deep layer-wise combinations to solve tasks. The design is inspired by the biological principles of universality and diversity, where neural network building blocks are reused in different hierarchical manifestations. Hence, the X-LoRA model can be easily implemented for any existing large language model without a need for modifications of the underlying structure. We develop a tailored X-LoRA model that offers scientific capabilities, including forward/inverse analysis tasks and enhanced reasoning capability, focused on biomaterial analysis, protein mechanics, and design. The impact of this work includes access to readily expandable and adaptable models with strong domain knowledge and the capability to integrate across areas of knowledge. Featuring experts in biology, mathematics, reasoning, bio-inspired materials, mechanics and materials, chemistry, protein biophysics, mechanics, and quantum-mechanics based molecular properties, we conduct a series of physics-focused case studies. We examine knowledge recall, protein mechanics forward/inverse tasks, protein design, adversarial agentic modeling including ontological knowledge graph construction, and molecular design. The model is capable not only of making quantitative predictions of nanomechanical properties of proteins or quantum mechanical molecular properties but also reasoning over the results and correctly predicting likely mechanisms that explain distinct molecular behaviors.}",
|
||||
issn = {2770-9019},
|
||||
doi = {10.1063/5.0203126},
|
||||
url = {https://doi.org/10.1063/5.0203126},
|
||||
eprint = {https://pubs.aip.org/aip/aml/article-pdf/doi/10.1063/5.0203126/19964043/026119\_1\_5.0203126.pdf},
|
||||
}
|
||||
```
|
||||
|
||||
## XLoraConfig
|
||||
|
||||
[[autodoc]] tuners.xlora.config.XLoraConfig
|
||||
|
||||
## XLoraModel
|
||||
|
||||
[[autodoc]] tuners.xlora.model.XLoraModel
|
@ -76,7 +76,7 @@ training_args = TrainingArguments(
|
||||
per_device_eval_batch_size=32,
|
||||
num_train_epochs=2,
|
||||
weight_decay=0.01,
|
||||
evaluation_strategy="epoch",
|
||||
eval_strategy="epoch",
|
||||
save_strategy="epoch",
|
||||
load_best_model_at_end=True,
|
||||
)
|
||||
|
@ -20,6 +20,8 @@ A popular way to efficiently train large models is to insert (typically in the a
|
||||
|
||||
There are several different ways to express the weight matrix as a low-rank decomposition, but [Low-Rank Adaptation (LoRA)](../conceptual_guides/adapter#low-rank-adaptation-lora) is the most common method. The PEFT library supports several other LoRA variants, such as [Low-Rank Hadamard Product (LoHa)](../conceptual_guides/adapter#low-rank-hadamard-product-loha), [Low-Rank Kronecker Product (LoKr)](../conceptual_guides/adapter#low-rank-kronecker-product-lokr), and [Adaptive Low-Rank Adaptation (AdaLoRA)](../conceptual_guides/adapter#adaptive-low-rank-adaptation-adalora). You can learn more about how these methods work conceptually in the [Adapters](../conceptual_guides/adapter) guide. If you're interested in applying these methods to other tasks and use cases like semantic segmentation, token classification, take a look at our [notebook collection](https://huggingface.co/collections/PEFT/notebooks-6573b28b33e5a4bf5b157fc1)!
|
||||
|
||||
Additionally, PEFT supports the [X-LoRA](../conceptual_guides/adapter#mixture-of-lora-experts-x-lora) Mixture of LoRA Experts method.
|
||||
|
||||
This guide will show you how to quickly train an image classification model - with a low-rank decomposition method - to identify the class of food shown in an image.
|
||||
|
||||
<Tip>
|
||||
@ -257,7 +259,7 @@ batch_size = 128
|
||||
args = TrainingArguments(
|
||||
peft_model_id,
|
||||
remove_unused_columns=False,
|
||||
evaluation_strategy="epoch",
|
||||
eval_strategy="epoch",
|
||||
save_strategy="epoch",
|
||||
learning_rate=5e-3,
|
||||
per_device_train_batch_size=batch_size,
|
||||
@ -307,7 +309,7 @@ Let's load the model from the Hub and test it out on a food image.
|
||||
|
||||
```py
|
||||
from peft import PeftConfig, PeftModel
|
||||
from transfomers import AutoImageProcessor
|
||||
from transformers import AutoImageProcessor
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
|
@ -90,7 +90,7 @@ def preprocess_function(examples, text_column="Tweet text", label_column="text_l
|
||||
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
|
||||
"attention_mask"
|
||||
][i]
|
||||
labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids
|
||||
labels["input_ids"][i] = [-100] * (max_length - len(label_input_ids)) + label_input_ids
|
||||
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
|
||||
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
|
||||
labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length])
|
||||
|
@ -99,7 +99,7 @@ You can create your own configuration for training by initializing a [`PromptEnc
|
||||
from peft import PromptEncoderConfig, TaskType
|
||||
|
||||
p_tuning_config = PromptEncoderConfig(
|
||||
encoder_reprameterization_type="MLP",
|
||||
encoder_reparameterization_type="MLP",
|
||||
encoder_hidden_size=128,
|
||||
num_attention_heads=16,
|
||||
num_layers=24,
|
||||
|
@ -37,7 +37,7 @@ from utils.unet_2d_condition import UNet2DConditionNewModel
|
||||
|
||||
|
||||
sys.path.append("../../src")
|
||||
from peft import PeftModel
|
||||
from peft import PeftModel # noqa: E402
|
||||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
|
1388
examples/causal_language_modeling/peft_ln_tuning_clm.ipynb
Normal file
1388
examples/causal_language_modeling/peft_ln_tuning_clm.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@ -168,7 +168,7 @@
|
||||
"model = AutoModelForCausalLM.from_pretrained(\n",
|
||||
" model_name,\n",
|
||||
" low_cpu_mem_usage=True\n",
|
||||
" # use_flash_attention_2=True, # leading to an error\n",
|
||||
" # attn_implementation =\"flash_attention_2\", # leading to an error\n",
|
||||
")\n",
|
||||
"model.resize_token_embeddings(len(tokenizer))"
|
||||
]
|
||||
@ -956,7 +956,7 @@
|
||||
"inference_model = AutoModelForCausalLM.from_pretrained(\n",
|
||||
" model_name,\n",
|
||||
" low_cpu_mem_usage=True,\n",
|
||||
" # use_flash_attention_2=True,\n",
|
||||
" # attn_implementation =\"flash_attention_2\",\n",
|
||||
")\n",
|
||||
"inference_model.resize_token_embeddings(len(tokenizer))\n",
|
||||
"\n",
|
||||
|
@ -558,7 +558,7 @@
|
||||
" per_device_train_batch_size=batch_size,\n",
|
||||
" learning_rate=lr,\n",
|
||||
" num_train_epochs=num_epochs,\n",
|
||||
" evaluation_strategy=\"epoch\",\n",
|
||||
" eval_strategy=\"epoch\",\n",
|
||||
" logging_strategy=\"epoch\",\n",
|
||||
" save_strategy=\"no\",\n",
|
||||
" report_to=[],\n",
|
||||
|
2858
examples/dna_language_models/dna_lm.ipynb
Normal file
2858
examples/dna_language_models/dna_lm.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
8544
examples/dora_finetuning/QDoRA_finetuning.ipynb
Normal file
8544
examples/dora_finetuning/QDoRA_finetuning.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
106
examples/dora_finetuning/README.md
Normal file
106
examples/dora_finetuning/README.md
Normal file
@ -0,0 +1,106 @@
|
||||
# DoRA: Weight-Decomposed Low-Rank Adaptation
|
||||
|
||||

|
||||
|
||||
|
||||
## Introduction
|
||||
[DoRA](https://arxiv.org/abs/2402.09353) is a novel approach that leverages low rank adaptation through weight decomposition analysis to investigate the inherent differences between full fine-tuning and LoRA. DoRA initially decomposes the pretrained weight into its magnitude and directional components and finetunes both of them. Because the directional component is large in terms of parameter numbers, we further decompose it with LoRA for efficient finetuning. This results in enhancing both the learning capacity and training stability of LoRA while avoiding any additional inference overhead.
|
||||
|
||||
## Quick start
|
||||
```python
|
||||
import torch
|
||||
from peft import LoraConfig, get_peft_model
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer
|
||||
from datasets import load_dataset
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", device_map="cuda")
|
||||
tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
||||
dataset = load_dataset("timdettmers/openassistant-guanaco", split="train")
|
||||
lora_config = LoraConfig(
|
||||
use_dora=True
|
||||
)
|
||||
peft_model = get_peft_model(model, lora_config)
|
||||
trainer = transformers.Trainer(
|
||||
model=peft_model,
|
||||
train_dataset=dataset,
|
||||
dataset_text_field="text",
|
||||
max_seq_length=2048,
|
||||
tokenizer=tokenizer,
|
||||
)
|
||||
trainer.train()
|
||||
peft_model.save_pretrained("dora-llama-3-8b")
|
||||
```
|
||||
|
||||
There is no additional change needed to your standard LoRA procedure, except for specifying `use_dora = True` option in your lora configuration.
|
||||
|
||||
|
||||
Run the finetuning script simply by running:
|
||||
```bash
|
||||
python examples/dora_finetuning/dora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --data_path timdettmers/openassistant-guanaco
|
||||
```
|
||||
This 👆🏻 by default will load the model in peft set up with LoRA config. Now if you wanna quickly compare it with Dora, all you need to do is to input ` --use_dora` in the command line. So same above example would be 👇🏻;
|
||||
|
||||
```bash
|
||||
python examples/dora_finetuning/dora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --data_path timdettmers/openassistant-guanaco --use_dora
|
||||
```
|
||||
|
||||
DoRA also supports quantization. To use 4-bit quantization try:
|
||||
|
||||
```bash
|
||||
python examples/dora_finetuning/dora_finetuning.py --base_model meta-llama/Meta-Llama-3-8B --quantize
|
||||
```
|
||||
|
||||
Similarly, by default the LoRA layers are the attention and MLP layers of LLama model, if you get to choose a different set of layers for LoRA to be applied on, you can simply define it using:
|
||||
```bash
|
||||
python examples/dora_finetuning/dora_finetuning.py --lora_target_modules "q_proj,k_proj,v_proj,o_proj"
|
||||
```
|
||||
|
||||
### Full example of the script
|
||||
```bash
|
||||
python dora_finetuning.py \
|
||||
--base_model "PATH_TO_MODEL" \
|
||||
--data_path "PATH_TO_DATASET" \
|
||||
--output_dir "PATH_TO_OUTPUT_DIR" \
|
||||
--batch_size 1 \
|
||||
--num_epochs 3 \
|
||||
--learning_rate 3e-4 \
|
||||
--cutoff_len 512 \
|
||||
--val_set_size 500 \
|
||||
--use_dora \
|
||||
--quantize \
|
||||
--eval_step 10 \
|
||||
--save_step 100 \
|
||||
--device "cuda:0" \
|
||||
--lora_r 16 \
|
||||
--lora_alpha 32 \
|
||||
--lora_dropout 0.05 \
|
||||
--lora_target_modules "q_proj,k_proj,v_proj,o_proj" \
|
||||
--hub_model_id "YOUR_HF_REPO" \
|
||||
--push_to_hub
|
||||
```
|
||||
## Use the model on 🤗
|
||||
You can load and use the model as any other 🤗 models.
|
||||
```python
|
||||
from transformers import AutoModel
|
||||
model = AutoModel.from_pretrained("ShirinYamani/huggyllama-llama-7b-finetuned")
|
||||
```
|
||||
|
||||
## DoRA vs. LoRA
|
||||
In general, DoRA finetuning on diffusion models is still experimental and is likely to require different hyperparameter values to perform best compared to LoRA.
|
||||
|
||||
Specifically, people have noticed 2 differences to take into account in your training:
|
||||
|
||||
1. LoRA seem to converge faster than DoRA (so a set of parameters that may lead to overfitting when training a LoRA may be working well for a DoRA)
|
||||
|
||||
2. DoRA quality superior to LoRA especially in lower ranks: The difference in quality of DoRA of rank 8 and LoRA of rank 8 appears to be more significant than when training ranks of 32 or 64 for example.
|
||||
|
||||
|
||||
## Citation
|
||||
```
|
||||
@article{liu2024dora,
|
||||
title={DoRA: Weight-Decomposed Low-Rank Adaptation},
|
||||
author={Liu, Shih-Yang and Wang, Chien-Yi and Yin, Hongxu and Molchanov, Pavlo and Wang, Yu-Chiang Frank and Cheng, Kwang-Ting and Chen, Min-Hung},
|
||||
journal={arXiv preprint arXiv:2402.09353},
|
||||
year={2024}
|
||||
}
|
||||
```
|
200
examples/dora_finetuning/dora_finetuning.py
Normal file
200
examples/dora_finetuning/dora_finetuning.py
Normal file
@ -0,0 +1,200 @@
|
||||
import os
|
||||
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from transformers import (
|
||||
AutoModelForCausalLM,
|
||||
AutoTokenizer,
|
||||
BitsAndBytesConfig,
|
||||
DataCollatorWithPadding,
|
||||
Trainer,
|
||||
TrainingArguments,
|
||||
)
|
||||
|
||||
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
|
||||
|
||||
|
||||
def train_model(
|
||||
base_model: str,
|
||||
data_path: str,
|
||||
output_dir: str,
|
||||
batch_size: int,
|
||||
num_epochs: int,
|
||||
learning_rate: float,
|
||||
cutoff_len: int,
|
||||
val_set_size: int,
|
||||
use_dora: bool,
|
||||
quantize: bool,
|
||||
eval_step: int,
|
||||
save_step: int,
|
||||
device: str,
|
||||
lora_r: int,
|
||||
lora_alpha: int,
|
||||
lora_dropout: float,
|
||||
lora_target_modules: str,
|
||||
hub_model_id: str,
|
||||
push_to_hub: bool,
|
||||
):
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
hf_token = os.getenv("HF_TOKEN")
|
||||
|
||||
# Setup device
|
||||
device = torch.device(device)
|
||||
print(f"Using device: {device}")
|
||||
|
||||
# load tokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model, token=hf_token)
|
||||
|
||||
# QDoRA (quantized dora): IF YOU WANNA QUANTIZE THE MODEL
|
||||
if quantize:
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model,
|
||||
token=hf_token,
|
||||
quantization_config=BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_compute_dtype=(
|
||||
torch.bfloat16 if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else torch.float16
|
||||
),
|
||||
bnb_4bit_use_double_quant=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
),
|
||||
)
|
||||
# setup for quantized training
|
||||
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=True)
|
||||
else:
|
||||
model = AutoModelForCausalLM.from_pretrained(base_model, token=hf_token)
|
||||
# LoRa config for the PEFT model
|
||||
lora_config = LoraConfig(
|
||||
use_dora=use_dora, # to use Dora OR compare to Lora just set the --use_dora
|
||||
r=lora_r, # Rank of matrix
|
||||
lora_alpha=lora_alpha,
|
||||
target_modules=(
|
||||
lora_target_modules.split(",")
|
||||
if lora_target_modules
|
||||
else ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
|
||||
),
|
||||
lora_dropout=lora_dropout,
|
||||
bias="none",
|
||||
)
|
||||
|
||||
# get the peft model with LoRa config
|
||||
model = get_peft_model(model, lora_config)
|
||||
|
||||
model.to(device) # MODEL TO GPU/CUDA
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
|
||||
# Load the dataset
|
||||
dataset = load_dataset(data_path)
|
||||
|
||||
def tokenize_function(examples):
|
||||
inputs = tokenizer(examples["text"], padding="max_length", truncation=True, max_length=cutoff_len)
|
||||
inputs["labels"] = inputs["input_ids"].copy() # setting labels for a language modeling task
|
||||
return inputs
|
||||
|
||||
# Tokenize the dataset and prepare for training
|
||||
tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=dataset["train"].column_names)
|
||||
|
||||
# Data collator to dynamically pad the batched examples
|
||||
data_collator = DataCollatorWithPadding(tokenizer)
|
||||
|
||||
# Define training arguments
|
||||
training_args = TrainingArguments(
|
||||
output_dir=output_dir,
|
||||
num_train_epochs=num_epochs,
|
||||
per_device_train_batch_size=batch_size,
|
||||
per_device_eval_batch_size=batch_size,
|
||||
warmup_steps=100,
|
||||
weight_decay=0.01,
|
||||
logging_dir="./logs",
|
||||
logging_steps=eval_step,
|
||||
save_steps=save_step,
|
||||
save_total_limit=2,
|
||||
push_to_hub=push_to_hub,
|
||||
hub_model_id=hub_model_id,
|
||||
gradient_accumulation_steps=16,
|
||||
fp16=True,
|
||||
learning_rate=learning_rate,
|
||||
hub_token=hf_token,
|
||||
)
|
||||
|
||||
# Clear CUDA cache to free memory
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Initialize the Trainer
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=tokenized_datasets["train"],
|
||||
eval_dataset=tokenized_datasets["test"],
|
||||
data_collator=data_collator,
|
||||
)
|
||||
|
||||
# Start model training
|
||||
trainer.train()
|
||||
|
||||
# Save and push the trained model and tokenizer
|
||||
if push_to_hub:
|
||||
# Push the main model to the hub
|
||||
trainer.push_to_hub(commit_message="Fine-tuned model")
|
||||
|
||||
# Save the model and tokenizer locally
|
||||
model.save_pretrained(output_dir)
|
||||
tokenizer.save_pretrained(output_dir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Fine-tune LLaMA with DoRA and PEFT")
|
||||
parser.add_argument("--base_model", type=str, default="huggyllama/llama-7b", help="Base model path or name")
|
||||
parser.add_argument(
|
||||
"--data_path", type=str, default="timdettmers/openassistant-guanaco", help="Dataset path or name"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_dir", type=str, default="path/to/output", help="Output directory for the fine-tuned model"
|
||||
)
|
||||
parser.add_argument("--batch_size", type=int, default=1, help="Batch size")
|
||||
parser.add_argument("--num_epochs", type=int, default=1, help="Number of training epochs")
|
||||
parser.add_argument("--learning_rate", type=float, default=3e-4, help="Learning rate")
|
||||
parser.add_argument("--cutoff_len", type=int, default=512, help="Cutoff length for tokenization")
|
||||
parser.add_argument("--val_set_size", type=int, default=500, help="Validation set size")
|
||||
parser.add_argument("--use_dora", action="store_true", help="Apply Dora")
|
||||
parser.add_argument("--quantize", action="store_true", help="Use quantization")
|
||||
parser.add_argument("--eval_step", type=int, default=10, help="Evaluation step interval")
|
||||
parser.add_argument("--save_step", type=int, default=100, help="Save step interval")
|
||||
parser.add_argument("--device", type=str, default="cuda:0", help="Device to use for training")
|
||||
parser.add_argument("--lora_r", type=int, default=8, help="LoRA rank")
|
||||
parser.add_argument("--lora_alpha", type=int, default=16, help="LoRA alpha")
|
||||
parser.add_argument("--lora_dropout", type=float, default=0.05, help="LoRA dropout rate")
|
||||
parser.add_argument(
|
||||
"--lora_target_modules", type=str, default=None, help="Comma-separated list of target modules for LoRA"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--hub_model_id",
|
||||
type=str,
|
||||
default="path/to/repo",
|
||||
help="Repository name to push the model on the Hugging Face Hub",
|
||||
)
|
||||
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to Hugging Face Hub")
|
||||
args = parser.parse_args()
|
||||
train_model(
|
||||
base_model=args.base_model,
|
||||
data_path=args.data_path,
|
||||
output_dir=args.output_dir,
|
||||
batch_size=args.batch_size,
|
||||
num_epochs=args.num_epochs,
|
||||
learning_rate=args.learning_rate,
|
||||
cutoff_len=args.cutoff_len,
|
||||
val_set_size=args.val_set_size,
|
||||
use_dora=args.use_dora,
|
||||
quantize=args.quantize,
|
||||
eval_step=args.eval_step,
|
||||
save_step=args.save_step,
|
||||
device=args.device,
|
||||
lora_r=args.lora_r,
|
||||
lora_alpha=args.lora_alpha,
|
||||
lora_dropout=args.lora_dropout,
|
||||
lora_target_modules=args.lora_target_modules,
|
||||
hub_model_id=args.hub_model_id,
|
||||
push_to_hub=args.push_to_hub,
|
||||
)
|
103
examples/ephemeral_gpu_offloading/load_with_dora.py
Normal file
103
examples/ephemeral_gpu_offloading/load_with_dora.py
Normal file
@ -0,0 +1,103 @@
|
||||
# Copyright 2024-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Example script demonstrating the time difference loading a model with a DoRA using ephemeral GPU offloading vs doing it purely on the CPU.
|
||||
|
||||
Example outputs:
|
||||
$ python load_with_dora.py
|
||||
--- Loading model ---
|
||||
Loading checkpoint shards: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:04<00:00, 1.03s/it]
|
||||
--- Loading PeftModel ---
|
||||
--- Done ---
|
||||
Model loading time: 4.83s
|
||||
PeftModel loading time: 28.14s
|
||||
Use ephemeral GPU offloading: False
|
||||
|
||||
(Note: if this was the first time you ran the script, or if your cache was cleared, the times shown above are invalid, due to the time taken to download the model and DoRA files. Just re-run the script in this case.)
|
||||
|
||||
$ python load_with_dora.py --ephemeral_gpu_offload
|
||||
--- Loading model ---
|
||||
Loading checkpoint shards: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:03<00:00, 1.11it/s]
|
||||
--- Loading PeftModel ---
|
||||
--- Done ---
|
||||
Model loading time: 4.28s
|
||||
PeftModel loading time: 16.59s
|
||||
Use ephemeral GPU offloading: True
|
||||
|
||||
(Note: if this was the first time you ran the script, or if your cache was cleared, the times shown above are invalid, due to the time taken to download the model and DoRA files. Just re-run the script in this case.)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import time
|
||||
|
||||
from huggingface_hub import snapshot_download
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
from peft import PeftModel
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Load a model with DoRA using ephemeral GPU offloading")
|
||||
parser.add_argument("--model", type=str, default="NousResearch/Hermes-2-Pro-Mistral-7B", help="Model to load")
|
||||
parser.add_argument(
|
||||
"--dora",
|
||||
type=str,
|
||||
default="peft-internal-testing/DoRA-Hermes-2-Pro-Mistral-7B",
|
||||
help="DoRA to use",
|
||||
)
|
||||
parser.add_argument("--ephemeral_gpu_offload", action="store_true", help="Use ephemeral GPU offloading")
|
||||
parser.add_argument(
|
||||
"--merge_model_path", type="str", help="Merge the model with the DoRA model and save to the given path"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
peft_model_kwargs = {
|
||||
"ephemeral_gpu_offload": args.ephemeral_gpu_offload,
|
||||
"max_memory": {"cpu": "256GiB"},
|
||||
"device_map": {"": "cpu"},
|
||||
}
|
||||
|
||||
# Predownload
|
||||
try:
|
||||
snapshot_download(repo_id=args.model)
|
||||
except Exception as e:
|
||||
print(f"Failed to download model: {e}")
|
||||
# We continue anyway as this might be e.g. a local directory or something
|
||||
try:
|
||||
snapshot_download(repo_id=args.dora)
|
||||
except Exception as e:
|
||||
print(f"Failed to download DoRA: {e}")
|
||||
# We continue anyway as this might be e.g. a local directory or something
|
||||
|
||||
start = time.perf_counter()
|
||||
print("--- Loading model ---")
|
||||
model = AutoModelForCausalLM.from_pretrained(args.model)
|
||||
model_time = time.perf_counter() - start
|
||||
print("--- Loading PeftModel ---")
|
||||
peft_model = PeftModel.from_pretrained(model, args.dora, **peft_model_kwargs)
|
||||
print("--- Done ---")
|
||||
peft_model_time = time.perf_counter() - start
|
||||
|
||||
print(f"Model loading time: {model_time:.2f}s")
|
||||
print(f"PeftModel loading time: {peft_model_time:.2f}s")
|
||||
print(f"Use ephemeral GPU offloading: {args.ephemeral_gpu_offload}")
|
||||
|
||||
if args.merge_model_path is not None:
|
||||
merged_model = peft_model.merge_and_unload(progressbar=True)
|
||||
merged_model.save_pretrained(args.merge_model_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -194,6 +194,8 @@ class AutoModelForSentenceEmbedding(nn.Module):
|
||||
try:
|
||||
return super().__getattr__(name) # defer to nn.Module's logic
|
||||
except AttributeError:
|
||||
if name == "model": # see #1892: prevent infinite recursion if class is not initialized
|
||||
raise
|
||||
return getattr(self.model, name)
|
||||
|
||||
|
||||
|
98
examples/hra_dreambooth/README.md
Normal file
98
examples/hra_dreambooth/README.md
Normal file
@ -0,0 +1,98 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# DreamBooth fine-tuning with HRA
|
||||
|
||||
This guide demonstrates how to use Householder reflection adaptation (HRA) method, to fine-tune Dreambooth with `stabilityai/stable-diffusion-2-1` model.
|
||||
|
||||
HRA provides a new perspective connecting LoRA to OFT and achieves encouraging performance in various downstream tasks.
|
||||
HRA adapts a pre-trained model by multiplying each frozen weight matrix with a chain of r learnable Householder reflections (HRs).
|
||||
HRA can be interpreted as either an OFT adapter or an adaptive LoRA.
|
||||
Consequently, it harnesses the advantages of both strategies, reducing parameters and computation costs while penalizing the loss of pre-training knowledge.
|
||||
For further details on HRA, please consult the [original HRA paper](https://arxiv.org/abs/2405.17484).
|
||||
|
||||
In this guide we provide a Dreambooth fine-tuning script that is available in [PEFT's GitHub repo examples](https://github.com/huggingface/peft/tree/main/examples/hra_dreambooth). This implementation is adapted from [peft's boft_dreambooth](https://github.com/huggingface/peft/tree/main/examples/boft_dreambooth).
|
||||
|
||||
You can try it out and fine-tune on your custom images.
|
||||
|
||||
## Set up your environment
|
||||
|
||||
Start by cloning the PEFT repository:
|
||||
|
||||
```bash
|
||||
git clone --recursive https://github.com/huggingface/peft
|
||||
```
|
||||
|
||||
Navigate to the directory containing the training scripts for fine-tuning Dreambooth with HRA:
|
||||
|
||||
```bash
|
||||
cd peft/examples/hra_dreambooth
|
||||
```
|
||||
|
||||
Set up your environment: install PEFT, and all the required libraries. At the time of writing this guide we recommend installing PEFT from source. The following environment setup should work on A100 and H100:
|
||||
|
||||
```bash
|
||||
conda create --name peft python=3.10
|
||||
conda activate peft
|
||||
conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 pytorch-cuda=11.8 -c pytorch -c nvidia
|
||||
conda install xformers -c xformers
|
||||
pip install -r requirements.txt
|
||||
pip install git+https://github.com/huggingface/peft
|
||||
```
|
||||
|
||||
## Download the data
|
||||
|
||||
[dreambooth](https://github.com/google/dreambooth) dataset should have been automatically cloned in the following structure when running the training script.
|
||||
|
||||
```
|
||||
hra_dreambooth
|
||||
├── data
|
||||
│ └── dreambooth
|
||||
│ └── dataset
|
||||
│ ├── backpack
|
||||
│ └── backpack_dog
|
||||
│ ...
|
||||
```
|
||||
|
||||
You can also put your custom images into `hra_dreambooth/data/dreambooth/dataset`.
|
||||
|
||||
## Fine-tune Dreambooth with HRA
|
||||
|
||||
```bash
|
||||
class_idx=0
|
||||
bash ./train_dreambooth.sh $class_idx
|
||||
```
|
||||
|
||||
where the `$class_idx` corresponds to different subjects ranging from 0 to 29.
|
||||
|
||||
Launch the training script with `accelerate` and pass hyperparameters, as well as LoRa-specific arguments to it such as:
|
||||
|
||||
- `use_hra`: Enables HRA in the training script.
|
||||
- `hra_r`: the number of HRs (i.e., r) across different layers, expressed in `int`.
|
||||
As r increases, the number of trainable parameters increases, which generally leads to improved performance.
|
||||
However, this also results in higher memory consumption and longer computation times.
|
||||
Therefore, r is usually set to 8.
|
||||
**Note**, please set r to an even number to avoid potential issues during initialization.
|
||||
- `hra_apply_GS`: Applies Gram-Schmidt orthogonalization. Default is `false`.
|
||||
- `hra_bias`: specify if the `bias` parameters should be trained. Can be `none`, `all` or `hra_only`.
|
||||
|
||||
If you are running this script on Windows, you may need to set the `--num_dataloader_workers` to 0.
|
||||
|
||||
To learn more about DreamBooth fine-tuning with prior-preserving loss, check out the [Diffusers documentation](https://huggingface.co/docs/diffusers/training/dreambooth#finetuning-with-priorpreserving-loss).
|
||||
|
||||
## Generate images with the fine-tuned model
|
||||
|
||||
To generate images with the fine-tuned model, simply run the jupyter notebook `dreambooth_inference.ipynb` for visualization with `jupyter notebook` under `./examples/hra_dreambooth`.
|
BIN
examples/hra_dreambooth/a_purple_qwe_backpack.png
Normal file
BIN
examples/hra_dreambooth/a_purple_qwe_backpack.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 466 KiB |
221
examples/hra_dreambooth/dreambooth_inference.ipynb
Normal file
221
examples/hra_dreambooth/dreambooth_inference.ipynb
Normal file
File diff suppressed because one or more lines are too long
13
examples/hra_dreambooth/requirements.txt
Normal file
13
examples/hra_dreambooth/requirements.txt
Normal file
@ -0,0 +1,13 @@
|
||||
transformers==4.36.2
|
||||
accelerate==0.25.0
|
||||
evaluate
|
||||
tqdm
|
||||
datasets==2.16.1
|
||||
diffusers==0.17.1
|
||||
Pillow
|
||||
huggingface_hub
|
||||
safetensors
|
||||
nb_conda_kernels
|
||||
ipykernel
|
||||
ipywidgets
|
||||
wandb==0.16.1
|
609
examples/hra_dreambooth/train_dreambooth.py
Normal file
609
examples/hra_dreambooth/train_dreambooth.py
Normal file
@ -0,0 +1,609 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2024-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The implementation is based on "Bridging The Gap between Low-rank and Orthogonal
|
||||
# Adaptation via Householder Reflection Adaptation" (https://arxiv.org/abs/2405.17484).
|
||||
|
||||
import hashlib
|
||||
import itertools
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
from contextlib import nullcontext
|
||||
from pathlib import Path
|
||||
|
||||
import datasets
|
||||
import diffusers
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torch.utils.checkpoint
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import ProjectConfiguration, set_seed
|
||||
from diffusers import (
|
||||
AutoencoderKL,
|
||||
DDIMScheduler,
|
||||
DiffusionPipeline,
|
||||
DPMSolverMultistepScheduler,
|
||||
UNet2DConditionModel,
|
||||
)
|
||||
from diffusers.optimization import get_scheduler
|
||||
from diffusers.utils import check_min_version
|
||||
from diffusers.utils.import_utils import is_xformers_available
|
||||
from huggingface_hub import Repository
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import AutoTokenizer
|
||||
from utils.args_loader import (
|
||||
get_full_repo_name,
|
||||
import_model_class_from_model_name_or_path,
|
||||
parse_args,
|
||||
)
|
||||
from utils.dataset import DreamBoothDataset, PromptDataset, collate_fn
|
||||
from utils.tracemalloc import TorchTracemalloc, b2mb
|
||||
|
||||
from peft import HRAConfig, get_peft_model
|
||||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.16.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
UNET_TARGET_MODULES = ["to_q", "to_v", "to_k", "query", "value", "key", "to_out.0", "add_k_proj", "add_v_proj"]
|
||||
TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"]
|
||||
|
||||
|
||||
def save_adaptor(accelerator, step, unet, text_encoder, args):
|
||||
unwarpped_unet = accelerator.unwrap_model(unet)
|
||||
unwarpped_unet.save_pretrained(
|
||||
os.path.join(args.output_dir, f"unet/{step}"), state_dict=accelerator.get_state_dict(unet)
|
||||
)
|
||||
if args.train_text_encoder:
|
||||
unwarpped_text_encoder = accelerator.unwrap_model(text_encoder)
|
||||
unwarpped_text_encoder.save_pretrained(
|
||||
os.path.join(args.output_dir, f"text_encoder/{step}"),
|
||||
state_dict=accelerator.get_state_dict(text_encoder),
|
||||
)
|
||||
|
||||
|
||||
def main(args):
|
||||
validation_prompts = list(filter(None, args.validation_prompt[0].split(".")))
|
||||
|
||||
logging_dir = Path(args.output_dir, args.logging_dir)
|
||||
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
|
||||
|
||||
accelerator = Accelerator(
|
||||
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
||||
mixed_precision=args.mixed_precision,
|
||||
log_with=args.report_to if args.report_to != "none" else None,
|
||||
project_dir=accelerator_project_config,
|
||||
)
|
||||
if args.report_to == "wandb":
|
||||
import wandb
|
||||
|
||||
args.wandb_project_name = args.project_name
|
||||
args.wandb_run_name = args.run_name
|
||||
wandb_init = {
|
||||
"wandb": {
|
||||
"name": args.wandb_run_name,
|
||||
"mode": "online",
|
||||
}
|
||||
}
|
||||
|
||||
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
|
||||
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
|
||||
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
|
||||
if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
|
||||
raise ValueError(
|
||||
"Gradient accumulation is not supported when training the text encoder in distributed training. "
|
||||
"Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
|
||||
)
|
||||
|
||||
# Make one log on every process with the configuration for debugging.
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
level=logging.INFO,
|
||||
)
|
||||
logger.info(accelerator.state, main_process_only=False)
|
||||
if accelerator.is_local_main_process:
|
||||
datasets.utils.logging.set_verbosity_warning()
|
||||
transformers.utils.logging.set_verbosity_warning()
|
||||
diffusers.utils.logging.set_verbosity_info()
|
||||
else:
|
||||
datasets.utils.logging.set_verbosity_error()
|
||||
transformers.utils.logging.set_verbosity_error()
|
||||
diffusers.utils.logging.set_verbosity_error()
|
||||
|
||||
# If passed along, set the training seed now.
|
||||
global_seed = hash(args.run_name) % (2**32)
|
||||
set_seed(global_seed)
|
||||
|
||||
# Generate class images if prior preservation is enabled.
|
||||
if args.with_prior_preservation:
|
||||
class_images_dir = Path(args.class_data_dir)
|
||||
if not class_images_dir.exists():
|
||||
class_images_dir.mkdir(parents=True)
|
||||
cur_class_images = len(list(class_images_dir.iterdir()))
|
||||
|
||||
if cur_class_images < args.num_class_images:
|
||||
torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
|
||||
if args.prior_generation_precision == "fp32":
|
||||
torch_dtype = torch.float32
|
||||
elif args.prior_generation_precision == "fp16":
|
||||
torch_dtype = torch.float16
|
||||
elif args.prior_generation_precision == "bf16":
|
||||
torch_dtype = torch.bfloat16
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
args.pretrained_model_name_or_path,
|
||||
torch_dtype=torch_dtype,
|
||||
safety_checker=None,
|
||||
revision=args.revision,
|
||||
)
|
||||
pipeline.set_progress_bar_config(disable=True)
|
||||
|
||||
num_new_images = args.num_class_images - cur_class_images
|
||||
logger.info(f"Number of class images to sample: {num_new_images}.")
|
||||
|
||||
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
|
||||
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
|
||||
|
||||
sample_dataloader = accelerator.prepare(sample_dataloader)
|
||||
pipeline.to(accelerator.device)
|
||||
|
||||
for example in tqdm(
|
||||
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
|
||||
):
|
||||
images = pipeline(example["prompt"]).images
|
||||
|
||||
for i, image in enumerate(images):
|
||||
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
|
||||
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
|
||||
image.save(image_filename)
|
||||
|
||||
del pipeline
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Handle the repository creation
|
||||
if accelerator.is_main_process:
|
||||
if args.push_to_hub:
|
||||
if args.hub_model_id is None:
|
||||
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
|
||||
else:
|
||||
repo_name = args.hub_model_id
|
||||
repo = Repository(args.output_dir, clone_from=repo_name) # noqa: F841
|
||||
|
||||
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
||||
if "step_*" not in gitignore:
|
||||
gitignore.write("step_*\n")
|
||||
if "epoch_*" not in gitignore:
|
||||
gitignore.write("epoch_*\n")
|
||||
elif args.output_dir is not None:
|
||||
os.makedirs(args.output_dir, exist_ok=True)
|
||||
|
||||
# Load the tokenizer
|
||||
if args.tokenizer_name:
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
|
||||
elif args.pretrained_model_name_or_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
args.pretrained_model_name_or_path,
|
||||
subfolder="tokenizer",
|
||||
revision=args.revision,
|
||||
use_fast=False,
|
||||
)
|
||||
|
||||
# import correct text encoder class
|
||||
text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
|
||||
|
||||
# Load scheduler and models
|
||||
noise_scheduler = DDIMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
|
||||
|
||||
text_encoder = text_encoder_cls.from_pretrained(
|
||||
args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
|
||||
)
|
||||
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
|
||||
unet = UNet2DConditionModel.from_pretrained(
|
||||
args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
|
||||
)
|
||||
|
||||
if args.use_hra:
|
||||
config = HRAConfig(
|
||||
r=args.hra_r,
|
||||
apply_GS=args.hra_apply_GS,
|
||||
target_modules=UNET_TARGET_MODULES,
|
||||
bias=args.hra_bias,
|
||||
)
|
||||
unet = get_peft_model(unet, config, adapter_name=args.run_name)
|
||||
unet.print_trainable_parameters()
|
||||
|
||||
vae.requires_grad_(False)
|
||||
unet.train()
|
||||
|
||||
if args.train_text_encoder and args.use_hra:
|
||||
config = HRAConfig(
|
||||
r=args.hra_r,
|
||||
apply_GS=args.hra_apply_GS,
|
||||
target_modules=UNET_TARGET_MODULES,
|
||||
bias=args.hra_bias,
|
||||
)
|
||||
text_encoder = get_peft_model(text_encoder, config, adapter_name=args.run_name)
|
||||
text_encoder.print_trainable_parameters()
|
||||
text_encoder.train()
|
||||
else:
|
||||
text_encoder.requires_grad_(False)
|
||||
|
||||
# For mixed precision training we cast the text_encoder and vae weights to half-precision
|
||||
# as these models are only used for inference, keeping weights in full precision is not required.
|
||||
weight_dtype = torch.float32
|
||||
if accelerator.mixed_precision == "fp16":
|
||||
weight_dtype = torch.float16
|
||||
elif accelerator.mixed_precision == "bf16":
|
||||
weight_dtype = torch.bfloat16
|
||||
|
||||
# Move unet, vae and text_encoder to device and cast to weight_dtype
|
||||
unet.to(accelerator.device, dtype=weight_dtype)
|
||||
vae.to(accelerator.device, dtype=weight_dtype)
|
||||
text_encoder.to(accelerator.device, dtype=weight_dtype)
|
||||
|
||||
if args.enable_xformers_memory_efficient_attention:
|
||||
if is_xformers_available():
|
||||
unet.enable_xformers_memory_efficient_attention()
|
||||
else:
|
||||
raise ValueError("xformers is not available. Make sure it is installed correctly")
|
||||
|
||||
if args.gradient_checkpointing:
|
||||
unet.enable_gradient_checkpointing()
|
||||
# below fails when using hra so commenting it out
|
||||
if args.train_text_encoder and not args.use_hra:
|
||||
text_encoder.gradient_checkpointing_enable()
|
||||
|
||||
# Enable TF32 for faster training on Ampere GPUs,
|
||||
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
|
||||
if args.allow_tf32:
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
||||
if args.scale_lr:
|
||||
args.learning_rate = (
|
||||
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
|
||||
)
|
||||
|
||||
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
|
||||
if args.use_8bit_adam:
|
||||
try:
|
||||
import bitsandbytes as bnb
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
|
||||
)
|
||||
|
||||
optimizer_class = bnb.optim.AdamW8bit
|
||||
else:
|
||||
optimizer_class = torch.optim.AdamW
|
||||
|
||||
# Optimizer creation
|
||||
params_to_optimize = [param for param in unet.parameters() if param.requires_grad]
|
||||
|
||||
if args.train_text_encoder:
|
||||
params_to_optimize += [param for param in text_encoder.parameters() if param.requires_grad]
|
||||
|
||||
optimizer = optimizer_class(
|
||||
params_to_optimize,
|
||||
lr=args.learning_rate,
|
||||
betas=(args.adam_beta1, args.adam_beta2),
|
||||
weight_decay=args.adam_weight_decay,
|
||||
eps=args.adam_epsilon,
|
||||
)
|
||||
|
||||
# Download the official dreambooth dataset from the official repository: https://github.com/google/dreambooth.git
|
||||
data_path = os.path.join(os.getcwd(), "data", "dreambooth")
|
||||
if not os.path.exists(data_path):
|
||||
os.makedirs(os.path.join(os.getcwd(), "data"), exist_ok=True)
|
||||
os.system(f"git clone https://github.com/google/dreambooth.git '{data_path}'")
|
||||
|
||||
# Dataset and DataLoaders creation:
|
||||
train_dataset = DreamBoothDataset(
|
||||
instance_data_root=args.instance_data_dir,
|
||||
instance_prompt=args.instance_prompt,
|
||||
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
|
||||
class_prompt=args.class_prompt,
|
||||
tokenizer=tokenizer,
|
||||
size=args.resolution,
|
||||
center_crop=args.center_crop,
|
||||
)
|
||||
|
||||
train_dataloader = torch.utils.data.DataLoader(
|
||||
train_dataset,
|
||||
batch_size=args.train_batch_size,
|
||||
shuffle=True,
|
||||
collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
|
||||
num_workers=args.num_dataloader_workers,
|
||||
)
|
||||
|
||||
# Scheduler and math around the number of training steps.
|
||||
overrode_max_train_steps = False
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
||||
if args.max_train_steps is None:
|
||||
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||||
overrode_max_train_steps = True
|
||||
|
||||
lr_scheduler = get_scheduler(
|
||||
args.lr_scheduler,
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
|
||||
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
|
||||
num_cycles=args.lr_num_cycles,
|
||||
power=args.lr_power,
|
||||
)
|
||||
|
||||
# Prepare everything with our `accelerator`.
|
||||
if args.train_text_encoder:
|
||||
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
|
||||
)
|
||||
else:
|
||||
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
|
||||
unet, optimizer, train_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# For mixed precision training we cast the text_encoder and vae weights to half-precision
|
||||
# as these models are only used for inference, keeping weights in full precision is not required.
|
||||
weight_dtype = torch.float32
|
||||
if accelerator.mixed_precision == "fp16":
|
||||
weight_dtype = torch.float16
|
||||
elif accelerator.mixed_precision == "bf16":
|
||||
weight_dtype = torch.bfloat16
|
||||
|
||||
# Move vae and text_encoder to device and cast to weight_dtype
|
||||
vae.to(accelerator.device, dtype=weight_dtype)
|
||||
if not args.train_text_encoder:
|
||||
text_encoder.to(accelerator.device, dtype=weight_dtype)
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
||||
if overrode_max_train_steps:
|
||||
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||||
# Afterwards we recalculate our number of training epochs
|
||||
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
||||
|
||||
# We need to initialize the trackers we use, and also store our configuration.
|
||||
# The trackers initializes automatically on the main process.
|
||||
if accelerator.is_main_process:
|
||||
if args.report_to == "wandb":
|
||||
accelerator.init_trackers(args.wandb_project_name, config=vars(args), init_kwargs=wandb_init)
|
||||
else:
|
||||
accelerator.init_trackers(args.project_name, config=vars(args))
|
||||
|
||||
# Train!
|
||||
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
||||
|
||||
logger.info("***** Running training *****")
|
||||
logger.info(f" Num examples = {len(train_dataset)}")
|
||||
logger.info(f" Num batches each epoch = {len(train_dataloader)}")
|
||||
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
||||
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
||||
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
||||
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
||||
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
||||
global_step = 0
|
||||
first_epoch = 0
|
||||
|
||||
# Potentially load in the weights and states from a previous save
|
||||
if args.resume_from_checkpoint:
|
||||
if args.resume_from_checkpoint != "latest":
|
||||
path = os.path.basename(args.resume_from_checkpoint)
|
||||
else:
|
||||
# Get the most recent checkpoint
|
||||
dirs = os.listdir(args.output_dir)
|
||||
dirs = [d for d in dirs if d.startswith("checkpoint")]
|
||||
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
|
||||
path = dirs[-1] if len(dirs) > 0 else None
|
||||
accelerator.print(f"Resuming from checkpoint {path}")
|
||||
accelerator.load_state(os.path.join(args.output_dir, path))
|
||||
global_step = int(path.split("-")[1])
|
||||
|
||||
resume_global_step = global_step * args.gradient_accumulation_steps
|
||||
first_epoch = resume_global_step // num_update_steps_per_epoch
|
||||
resume_step = resume_global_step % num_update_steps_per_epoch
|
||||
|
||||
# Only show the progress bar once on each machine.
|
||||
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
|
||||
progress_bar.set_description("Steps")
|
||||
|
||||
if args.train_text_encoder:
|
||||
text_encoder.train()
|
||||
|
||||
for epoch in range(first_epoch, args.num_train_epochs):
|
||||
unet.train()
|
||||
|
||||
with TorchTracemalloc() if not args.no_tracemalloc else nullcontext() as tracemalloc:
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# Skip steps until we reach the resumed step
|
||||
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
|
||||
if step % args.gradient_accumulation_steps == 0:
|
||||
progress_bar.update(1)
|
||||
if args.report_to == "wandb":
|
||||
accelerator.print(progress_bar)
|
||||
continue
|
||||
|
||||
with accelerator.accumulate(unet):
|
||||
# Convert images to latent space
|
||||
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
|
||||
latents = latents * vae.config.scaling_factor
|
||||
|
||||
# Sample noise that we'll add to the latents
|
||||
noise = torch.randn_like(latents)
|
||||
bsz = latents.shape[0]
|
||||
# Sample a random timestep for each image
|
||||
timesteps = torch.randint(
|
||||
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device
|
||||
)
|
||||
timesteps = timesteps.long()
|
||||
|
||||
# Add noise to the latents according to the noise magnitude at each timestep
|
||||
# (this is the forward diffusion process)
|
||||
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
|
||||
|
||||
# Get the text embedding for conditioning
|
||||
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
|
||||
|
||||
# Predict the noise residual
|
||||
model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
|
||||
|
||||
# Get the target for loss depending on the prediction type
|
||||
if noise_scheduler.config.prediction_type == "epsilon":
|
||||
target = noise
|
||||
elif noise_scheduler.config.prediction_type == "v_prediction":
|
||||
target = noise_scheduler.get_velocity(latents, noise, timesteps)
|
||||
else:
|
||||
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
||||
|
||||
if args.with_prior_preservation:
|
||||
# Chunk the noise and model_pred into two parts and compute the loss on each part separately.
|
||||
model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
|
||||
target, target_prior = torch.chunk(target, 2, dim=0)
|
||||
|
||||
# Compute instance loss
|
||||
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
||||
|
||||
# Compute prior loss
|
||||
prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
|
||||
|
||||
# Add the prior loss to the instance loss.
|
||||
loss = loss + args.prior_loss_weight * prior_loss
|
||||
else:
|
||||
loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
|
||||
|
||||
accelerator.backward(loss)
|
||||
|
||||
if accelerator.sync_gradients:
|
||||
params_to_clip = (
|
||||
itertools.chain(unet.parameters(), text_encoder.parameters())
|
||||
if args.train_text_encoder
|
||||
else unet.parameters()
|
||||
)
|
||||
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
|
||||
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Checks if the accelerator has performed an optimization step behind the scenes
|
||||
if accelerator.sync_gradients:
|
||||
progress_bar.update(1)
|
||||
if args.report_to == "wandb":
|
||||
accelerator.print(progress_bar)
|
||||
global_step += 1
|
||||
|
||||
if global_step % args.checkpointing_steps == 0 and global_step != 0:
|
||||
if accelerator.is_main_process:
|
||||
save_adaptor(accelerator, global_step, unet, text_encoder, args)
|
||||
|
||||
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
|
||||
progress_bar.set_postfix(**logs)
|
||||
accelerator.log(logs, step=global_step)
|
||||
|
||||
if (
|
||||
args.validation_prompt is not None
|
||||
and (step + num_update_steps_per_epoch * epoch) % args.validation_steps == 0
|
||||
and global_step > 10
|
||||
):
|
||||
unet.eval()
|
||||
|
||||
logger.info(
|
||||
f"Running validation... \n Generating {len(validation_prompts)} images with prompt:"
|
||||
f" {validation_prompts[0]}, ......"
|
||||
)
|
||||
# create pipeline
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
args.pretrained_model_name_or_path,
|
||||
safety_checker=None,
|
||||
revision=args.revision,
|
||||
)
|
||||
# set `keep_fp32_wrapper` to True because we do not want to remove
|
||||
# mixed precision hooks while we are still training
|
||||
pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True)
|
||||
pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True)
|
||||
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
||||
pipeline = pipeline.to(accelerator.device)
|
||||
pipeline.set_progress_bar_config(disable=True)
|
||||
|
||||
# run inference
|
||||
if args.seed is not None:
|
||||
generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
|
||||
else:
|
||||
generator = None
|
||||
|
||||
images = []
|
||||
val_img_dir = os.path.join(
|
||||
args.output_dir,
|
||||
f"validation/{global_step}",
|
||||
args.run_name,
|
||||
)
|
||||
os.makedirs(val_img_dir, exist_ok=True)
|
||||
|
||||
for val_promot in validation_prompts:
|
||||
image = pipeline(val_promot, num_inference_steps=50, generator=generator).images[0]
|
||||
image.save(os.path.join(val_img_dir, f"{'_'.join(val_promot.split(' '))}.png"[1:]))
|
||||
images.append(image)
|
||||
|
||||
for tracker in accelerator.trackers:
|
||||
if tracker.name == "tensorboard":
|
||||
np_images = np.stack([np.asarray(img) for img in images])
|
||||
tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
|
||||
if tracker.name == "wandb":
|
||||
import wandb
|
||||
|
||||
tracker.log(
|
||||
{
|
||||
"validation": [
|
||||
wandb.Image(image, caption=f"{i}: {validation_prompts[i]}")
|
||||
for i, image in enumerate(images)
|
||||
]
|
||||
}
|
||||
)
|
||||
|
||||
del pipeline
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
if global_step >= args.max_train_steps:
|
||||
break
|
||||
|
||||
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
|
||||
if not args.no_tracemalloc:
|
||||
accelerator.print(f"GPU Memory before entering the train : {b2mb(tracemalloc.begin)}")
|
||||
accelerator.print(f"GPU Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
|
||||
accelerator.print(f"GPU Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
|
||||
accelerator.print(
|
||||
f"GPU Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
|
||||
)
|
||||
|
||||
accelerator.print(f"CPU Memory before entering the train : {b2mb(tracemalloc.cpu_begin)}")
|
||||
accelerator.print(f"CPU Memory consumed at the end of the train (end-begin): {tracemalloc.cpu_used}")
|
||||
accelerator.print(f"CPU Peak Memory consumed during the train (max-begin): {tracemalloc.cpu_peaked}")
|
||||
accelerator.print(
|
||||
f"CPU Total Peak Memory consumed during the train (max): {tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)}"
|
||||
)
|
||||
|
||||
if args.push_to_hub:
|
||||
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
main(args)
|
185
examples/hra_dreambooth/train_dreambooth.sh
Normal file
185
examples/hra_dreambooth/train_dreambooth.sh
Normal file
@ -0,0 +1,185 @@
|
||||
|
||||
CLASS_IDX=$1
|
||||
|
||||
# Define the UNIQUE_TOKEN, CLASS_TOKENs, and SUBJECT_NAMES
|
||||
UNIQUE_TOKEN="qwe"
|
||||
|
||||
SUBJECT_NAMES=(
|
||||
"backpack" "backpack_dog" "bear_plushie" "berry_bowl" "can"
|
||||
"candle" "cat" "cat2" "clock" "colorful_sneaker"
|
||||
"dog" "dog2" "dog3" "dog5" "dog6"
|
||||
"dog7" "dog8" "duck_toy" "fancy_boot" "grey_sloth_plushie"
|
||||
"monster_toy" "pink_sunglasses" "poop_emoji" "rc_car" "red_cartoon"
|
||||
"robot_toy" "shiny_sneaker" "teapot" "vase" "wolf_plushie"
|
||||
)
|
||||
|
||||
CLASS_TOKENs=(
|
||||
"backpack" "backpack" "stuffed animal" "bowl" "can"
|
||||
"candle" "cat" "cat" "clock" "sneaker"
|
||||
"dog" "dog" "dog" "dog" "dog"
|
||||
"dog" "dog" "toy" "boot" "stuffed animal"
|
||||
"toy" "glasses" "toy" "toy" "cartoon"
|
||||
"toy" "sneaker" "teapot" "vase" "stuffed animal"
|
||||
)
|
||||
|
||||
CLASS_TOKEN=${CLASS_TOKENs[$CLASS_IDX]}
|
||||
SELECTED_SUBJECT=${SUBJECT_NAMES[$CLASS_IDX]}
|
||||
|
||||
if [[ $CLASS_IDX =~ ^(0|1|2|3|4|5|8|9|17|18|19|20|21|22|23|24|25|26|27|28|29)$ ]]; then
|
||||
PROMPT_LIST=(
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the jungle."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the snow."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on the beach."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on a cobblestone street."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of pink fabric."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a wooden floor."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a city in the background."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a mountain in the background."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a blue house in the background."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a purple rug in a forest."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a wheat field in the background."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a tree and autumn leaves in the background."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with the Eiffel Tower in the background."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} floating on top of water."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} floating in an ocean of milk."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of green grass with sunflowers around it."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a mirror."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of the sidewalk in a crowded street."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a dirt road."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a white rug."
|
||||
"a red ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
"a purple ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
"a shiny ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
"a wet ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
"a cube shaped ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
)
|
||||
|
||||
prompt_test_list=(
|
||||
"a ${CLASS_TOKEN} in the jungle"
|
||||
"a ${CLASS_TOKEN} in the snow"
|
||||
"a ${CLASS_TOKEN} on the beach"
|
||||
"a ${CLASS_TOKEN} on a cobblestone street"
|
||||
"a ${CLASS_TOKEN} on top of pink fabric"
|
||||
"a ${CLASS_TOKEN} on top of a wooden floor"
|
||||
"a ${CLASS_TOKEN} with a city in the background"
|
||||
"a ${CLASS_TOKEN} with a mountain in the background"
|
||||
"a ${CLASS_TOKEN} with a blue house in the background"
|
||||
"a ${CLASS_TOKEN} on top of a purple rug in a forest"
|
||||
"a ${CLASS_TOKEN} with a wheat field in the background"
|
||||
"a ${CLASS_TOKEN} with a tree and autumn leaves in the background"
|
||||
"a ${CLASS_TOKEN} with the Eiffel Tower in the background"
|
||||
"a ${CLASS_TOKEN} floating on top of water"
|
||||
"a ${CLASS_TOKEN} floating in an ocean of milk"
|
||||
"a ${CLASS_TOKEN} on top of green grass with sunflowers around it"
|
||||
"a ${CLASS_TOKEN} on top of a mirror"
|
||||
"a ${CLASS_TOKEN} on top of the sidewalk in a crowded street"
|
||||
"a ${CLASS_TOKEN} on top of a dirt road"
|
||||
"a ${CLASS_TOKEN} on top of a white rug"
|
||||
"a red ${CLASS_TOKEN}"
|
||||
"a purple ${CLASS_TOKEN}"
|
||||
"a shiny ${CLASS_TOKEN}"
|
||||
"a wet ${CLASS_TOKEN}"
|
||||
"a cube shaped ${CLASS_TOKEN}"
|
||||
)
|
||||
|
||||
else
|
||||
PROMPT_LIST=(
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the jungle."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the snow."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on the beach."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on a cobblestone street."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of pink fabric."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a wooden floor."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a city in the background."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a mountain in the background."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a blue house in the background."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a purple rug in a forest."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a red hat."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a santa hat."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a rainbow scarf."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a black top hat and a monocle."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a chef outfit."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a firefighter outfit."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a police outfit."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing pink glasses."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a yellow shirt."
|
||||
"a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a purple wizard outfit."
|
||||
"a red ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
"a purple ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
"a shiny ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
"a wet ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
"a cube shaped ${UNIQUE_TOKEN} ${CLASS_TOKEN}."
|
||||
)
|
||||
|
||||
prompt_test_list=(
|
||||
"a ${CLASS_TOKEN} in the jungle"
|
||||
"a ${CLASS_TOKEN} in the snow"
|
||||
"a ${CLASS_TOKEN} on the beach"
|
||||
"a ${CLASS_TOKEN} on a cobblestone street"
|
||||
"a ${CLASS_TOKEN} on top of pink fabric"
|
||||
"a ${CLASS_TOKEN} on top of a wooden floor"
|
||||
"a ${CLASS_TOKEN} with a city in the background"
|
||||
"a ${CLASS_TOKEN} with a mountain in the background"
|
||||
"a ${CLASS_TOKEN} with a blue house in the background"
|
||||
"a ${CLASS_TOKEN} on top of a purple rug in a forest"
|
||||
"a ${CLASS_TOKEN} wearing a red hat"
|
||||
"a ${CLASS_TOKEN} wearing a santa hat"
|
||||
"a ${CLASS_TOKEN} wearing a rainbow scarf"
|
||||
"a ${CLASS_TOKEN} wearing a black top hat and a monocle"
|
||||
"a ${CLASS_TOKEN} in a chef outfit"
|
||||
"a ${CLASS_TOKEN} in a firefighter outfit"
|
||||
"a ${CLASS_TOKEN} in a police outfit"
|
||||
"a ${CLASS_TOKEN} wearing pink glasses"
|
||||
"a ${CLASS_TOKEN} wearing a yellow shirt"
|
||||
"a ${CLASS_TOKEN} in a purple wizard outfit"
|
||||
"a red ${CLASS_TOKEN}"
|
||||
"a purple ${CLASS_TOKEN}"
|
||||
"a shiny ${CLASS_TOKEN}"
|
||||
"a wet ${CLASS_TOKEN}"
|
||||
"a cube shaped ${CLASS_TOKEN}"
|
||||
)
|
||||
fi
|
||||
|
||||
VALIDATION_PROMPT=${PROMPT_LIST[@]}
|
||||
INSTANCE_PROMPT="a photo of ${UNIQUE_TOKEN} ${CLASS_TOKEN}"
|
||||
CLASS_PROMPT="a photo of ${CLASS_TOKEN}"
|
||||
|
||||
export MODEL_NAME="stabilityai/stable-diffusion-2-1"
|
||||
|
||||
PEFT_TYPE="hra"
|
||||
HRA_R=8
|
||||
|
||||
export PROJECT_NAME="dreambooth_${PEFT_TYPE}"
|
||||
export RUN_NAME="${SELECTED_SUBJECT}_${PEFT_TYPE}_${HRA_R}"
|
||||
export INSTANCE_DIR="./data/dreambooth/dataset/${SELECTED_SUBJECT}"
|
||||
export CLASS_DIR="./data/class_data/${CLASS_TOKEN}"
|
||||
export OUTPUT_DIR="./data/output/${PEFT_TYPE}"
|
||||
|
||||
|
||||
accelerate launch train_dreambooth.py \
|
||||
--pretrained_model_name_or_path=$MODEL_NAME \
|
||||
--instance_data_dir=$INSTANCE_DIR \
|
||||
--class_data_dir="$CLASS_DIR" \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--project_name=$PROJECT_NAME \
|
||||
--run_name=$RUN_NAME \
|
||||
--with_prior_preservation \
|
||||
--prior_loss_weight=1.0 \
|
||||
--instance_prompt="$INSTANCE_PROMPT" \
|
||||
--validation_prompt="$VALIDATION_PROMPT" \
|
||||
--class_prompt="$CLASS_PROMPT" \
|
||||
--resolution=512 \
|
||||
--train_batch_size=1 \
|
||||
--num_dataloader_workers=2 \
|
||||
--lr_scheduler="constant" \
|
||||
--lr_warmup_steps=0 \
|
||||
--num_class_images=200 \
|
||||
--use_hra \
|
||||
--hra_r=$HRA_R \
|
||||
--hra_bias="hra_only" \
|
||||
--learning_rate=5e-3 \
|
||||
--max_train_steps=510 \
|
||||
--checkpointing_steps=200 \
|
||||
--validation_steps=200 \
|
||||
--enable_xformers_memory_efficient_attention \
|
||||
--report_to="none" \
|
0
examples/hra_dreambooth/utils/__init__.py
Normal file
0
examples/hra_dreambooth/utils/__init__.py
Normal file
377
examples/hra_dreambooth/utils/args_loader.py
Normal file
377
examples/hra_dreambooth/utils/args_loader.py
Normal file
@ -0,0 +1,377 @@
|
||||
# adapted from [peft's boft_dreambooth](https://github.com/huggingface/peft/tree/main/examples/boft_dreambooth)
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import warnings
|
||||
from typing import Optional
|
||||
|
||||
from huggingface_hub import HfFolder, whoami
|
||||
from transformers import PretrainedConfig
|
||||
|
||||
|
||||
def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
|
||||
text_encoder_config = PretrainedConfig.from_pretrained(
|
||||
pretrained_model_name_or_path,
|
||||
subfolder="text_encoder",
|
||||
revision=revision,
|
||||
)
|
||||
model_class = text_encoder_config.architectures[0]
|
||||
|
||||
if model_class == "CLIPTextModel":
|
||||
from transformers import CLIPTextModel
|
||||
|
||||
return CLIPTextModel
|
||||
elif model_class == "RobertaSeriesModelWithTransformation":
|
||||
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
|
||||
|
||||
return RobertaSeriesModelWithTransformation
|
||||
else:
|
||||
raise ValueError(f"{model_class} is not supported.")
|
||||
|
||||
|
||||
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
|
||||
if token is None:
|
||||
token = HfFolder.get_token()
|
||||
if organization is None:
|
||||
username = whoami(token)["name"]
|
||||
return f"{username}/{model_id}"
|
||||
else:
|
||||
return f"{organization}/{model_id}"
|
||||
|
||||
|
||||
def parse_args(input_args=None):
|
||||
parser = argparse.ArgumentParser(description="Simple example of a Dreambooth training script.")
|
||||
parser.add_argument(
|
||||
"--pretrained_model_name_or_path",
|
||||
type=str,
|
||||
default=None,
|
||||
required=True,
|
||||
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--revision",
|
||||
type=str,
|
||||
default=None,
|
||||
required=False,
|
||||
help="Revision of pretrained model identifier from huggingface.co/models.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tokenizer_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Pretrained tokenizer name or path if not the same as model_name",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--instance_data_dir",
|
||||
type=str,
|
||||
default=None,
|
||||
required=True,
|
||||
help="A folder containing the training data of instance images.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--class_data_dir",
|
||||
type=str,
|
||||
default=None,
|
||||
required=False,
|
||||
help="A folder containing the training data of class images.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--instance_prompt",
|
||||
type=str,
|
||||
default=None,
|
||||
required=True,
|
||||
help="The prompt with identifier specifying the instance",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--class_prompt",
|
||||
type=str,
|
||||
default=None,
|
||||
help="The prompt to specify images in the same class as provided instance images.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--with_prior_preservation",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Flag to add prior preservation loss.",
|
||||
)
|
||||
parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
|
||||
parser.add_argument(
|
||||
"--num_class_images",
|
||||
type=int,
|
||||
default=100,
|
||||
help=(
|
||||
"Minimal class images for prior preservation loss. If there are not enough images already present in"
|
||||
" class_data_dir, additional images will be sampled with class_prompt."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--validation_prompt",
|
||||
nargs="+",
|
||||
help="A prompt that is used during validation to verify that the model is learning.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num_validation_images",
|
||||
type=int,
|
||||
default=4,
|
||||
help="Number of images that should be generated during validation with `validation_prompt`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--validation_steps",
|
||||
type=int,
|
||||
default=500,
|
||||
help=(
|
||||
"Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt"
|
||||
" `args.validation_prompt` multiple times: `args.num_validation_images`."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output_dir",
|
||||
type=str,
|
||||
default="text-inversion-model",
|
||||
help="The output directory where the model predictions and checkpoints will be written.",
|
||||
)
|
||||
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
||||
parser.add_argument(
|
||||
"--resolution",
|
||||
type=int,
|
||||
default=512,
|
||||
help=(
|
||||
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
||||
" resolution"
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
|
||||
)
|
||||
parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
|
||||
|
||||
parser.add_argument(
|
||||
"--set_grads_to_none",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
|
||||
" behaviors, so disable this argument if it causes any problems. More info:"
|
||||
" https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
|
||||
),
|
||||
)
|
||||
|
||||
# hra args
|
||||
parser.add_argument("--use_hra", action="store_true", help="Whether to use HRA for parameter efficient tuning.")
|
||||
parser.add_argument("--hra_r", type=int, default=8, help="The rank of HRA across different layers.")
|
||||
parser.add_argument(
|
||||
"--hra_apply_GS", default=False, action="store_true", help="Whether to apply Gram-Schmidt orthogonalization."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--hra_bias",
|
||||
type=str,
|
||||
default="none",
|
||||
help="Bias type for HRA. Can be 'none', 'all' or 'hra_only', only used if use_hra is True.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no_tracemalloc",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
|
||||
)
|
||||
parser.add_argument("--num_train_epochs", type=int, default=1)
|
||||
parser.add_argument(
|
||||
"--max_train_steps",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--checkpointing_steps",
|
||||
type=int,
|
||||
default=500,
|
||||
help=(
|
||||
"Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
|
||||
" checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
|
||||
" training using `--resume_from_checkpoint`."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--resume_from_checkpoint",
|
||||
type=str,
|
||||
default=None,
|
||||
help=(
|
||||
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
|
||||
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gradient_accumulation_steps",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gradient_checkpointing",
|
||||
action="store_true",
|
||||
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--learning_rate",
|
||||
type=float,
|
||||
default=5e-6,
|
||||
help="Initial learning rate (after the potential warmup period) to use.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--scale_lr",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lr_scheduler",
|
||||
type=str,
|
||||
default="constant",
|
||||
help=(
|
||||
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
||||
' "constant", "constant_with_warmup"]'
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lr_num_cycles",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
|
||||
)
|
||||
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
|
||||
parser.add_argument(
|
||||
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
|
||||
)
|
||||
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
||||
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
||||
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
||||
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
||||
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
||||
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
||||
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
||||
parser.add_argument(
|
||||
"--hub_model_id",
|
||||
type=str,
|
||||
default=None,
|
||||
help="The name of the repository to keep in sync with the local `output_dir`.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--logging_dir",
|
||||
type=str,
|
||||
default="logs",
|
||||
help=(
|
||||
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
|
||||
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--allow_tf32",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
|
||||
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--project_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help=("The project name for log tracking"),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--run_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help=("The run name for log tracking"),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--report_to",
|
||||
type=str,
|
||||
default="wandb",
|
||||
help=(
|
||||
'The integration to report the results and logs to. Supported platforms are `"wandb"`'
|
||||
' (default), `"tensorboard"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--wandb_key",
|
||||
type=str,
|
||||
default=None,
|
||||
help=("If report to option is set to wandb, api-key for wandb used for login to wandb "),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--wandb_project_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help=("If report to option is set to wandb, project name in wandb for log tracking "),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--wandb_run_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help=("If report to option is set to wandb, project name in wandb for log tracking "),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp16", "bf16"],
|
||||
help=(
|
||||
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
||||
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
|
||||
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--prior_generation_precision",
|
||||
type=str,
|
||||
default=None,
|
||||
choices=["no", "fp32", "fp16", "bf16"],
|
||||
help=(
|
||||
"Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
||||
" 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
|
||||
),
|
||||
)
|
||||
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
||||
parser.add_argument(
|
||||
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
|
||||
)
|
||||
|
||||
if input_args is not None:
|
||||
args = parser.parse_args(input_args)
|
||||
else:
|
||||
args = parser.parse_args()
|
||||
|
||||
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
||||
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
||||
args.local_rank = env_local_rank
|
||||
|
||||
# Sanity checks
|
||||
# if args.dataset_name is None and args.train_data_dir is None:
|
||||
# raise ValueError("Need either a dataset name or a training folder.")
|
||||
|
||||
if args.with_prior_preservation:
|
||||
if args.class_data_dir is None:
|
||||
raise ValueError("You must specify a data directory for class images.")
|
||||
if args.class_prompt is None:
|
||||
raise ValueError("You must specify prompt for class images.")
|
||||
else:
|
||||
# logger is not available yet
|
||||
if args.class_data_dir is not None:
|
||||
warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
|
||||
if args.class_prompt is not None:
|
||||
warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
|
||||
|
||||
return args
|
128
examples/hra_dreambooth/utils/dataset.py
Normal file
128
examples/hra_dreambooth/utils/dataset.py
Normal file
@ -0,0 +1,128 @@
|
||||
# adapted from [peft's boft_dreambooth](https://github.com/huggingface/peft/tree/main/examples/boft_dreambooth)
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
from torch.utils.data import Dataset
|
||||
from torchvision import transforms
|
||||
|
||||
|
||||
class DreamBoothDataset(Dataset):
|
||||
"""
|
||||
A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
|
||||
It pre-processes the images and the tokenizes prompts.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
instance_data_root,
|
||||
instance_prompt,
|
||||
tokenizer,
|
||||
class_data_root=None,
|
||||
class_prompt=None,
|
||||
size=512,
|
||||
center_crop=False,
|
||||
):
|
||||
self.size = size
|
||||
self.center_crop = center_crop
|
||||
self.tokenizer = tokenizer
|
||||
|
||||
self.instance_data_root = Path(instance_data_root)
|
||||
if not self.instance_data_root.exists():
|
||||
raise ValueError("Instance images root doesn't exists.")
|
||||
|
||||
self.instance_images_path = list(Path(instance_data_root).iterdir())
|
||||
self.num_instance_images = len(self.instance_images_path)
|
||||
self.instance_prompt = instance_prompt
|
||||
self._length = self.num_instance_images
|
||||
|
||||
if class_data_root is not None:
|
||||
self.class_data_root = Path(class_data_root)
|
||||
self.class_data_root.mkdir(parents=True, exist_ok=True)
|
||||
self.class_images_path = list(self.class_data_root.iterdir())
|
||||
self.num_class_images = len(self.class_images_path)
|
||||
self._length = max(self.num_class_images, self.num_instance_images)
|
||||
self.class_prompt = class_prompt
|
||||
else:
|
||||
self.class_data_root = None
|
||||
|
||||
self.image_transforms = transforms.Compose(
|
||||
[
|
||||
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
|
||||
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.5], [0.5]),
|
||||
]
|
||||
)
|
||||
|
||||
def __len__(self):
|
||||
return self._length
|
||||
|
||||
def __getitem__(self, index):
|
||||
example = {}
|
||||
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
|
||||
if not instance_image.mode == "RGB":
|
||||
instance_image = instance_image.convert("RGB")
|
||||
example["instance_images"] = self.image_transforms(instance_image)
|
||||
example["instance_prompt_ids"] = self.tokenizer(
|
||||
self.instance_prompt,
|
||||
truncation=True,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
return_tensors="pt",
|
||||
).input_ids
|
||||
|
||||
if self.class_data_root:
|
||||
class_image = Image.open(self.class_images_path[index % self.num_class_images])
|
||||
if not class_image.mode == "RGB":
|
||||
class_image = class_image.convert("RGB")
|
||||
example["class_images"] = self.image_transforms(class_image)
|
||||
example["class_prompt_ids"] = self.tokenizer(
|
||||
self.class_prompt,
|
||||
truncation=True,
|
||||
padding="max_length",
|
||||
max_length=self.tokenizer.model_max_length,
|
||||
return_tensors="pt",
|
||||
).input_ids
|
||||
|
||||
return example
|
||||
|
||||
|
||||
def collate_fn(examples, with_prior_preservation=False):
|
||||
input_ids = [example["instance_prompt_ids"] for example in examples]
|
||||
pixel_values = [example["instance_images"] for example in examples]
|
||||
|
||||
# Concat class and instance examples for prior preservation.
|
||||
# We do this to avoid doing two forward passes.
|
||||
if with_prior_preservation:
|
||||
input_ids += [example["class_prompt_ids"] for example in examples]
|
||||
pixel_values += [example["class_images"] for example in examples]
|
||||
|
||||
pixel_values = torch.stack(pixel_values)
|
||||
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
||||
|
||||
input_ids = torch.cat(input_ids, dim=0)
|
||||
|
||||
batch = {
|
||||
"input_ids": input_ids,
|
||||
"pixel_values": pixel_values,
|
||||
}
|
||||
return batch
|
||||
|
||||
|
||||
class PromptDataset(Dataset):
|
||||
"A simple dataset to prepare the prompts to generate class images on multiple GPUs."
|
||||
|
||||
def __init__(self, prompt, num_samples):
|
||||
self.prompt = prompt
|
||||
self.num_samples = num_samples
|
||||
|
||||
def __len__(self):
|
||||
return self.num_samples
|
||||
|
||||
def __getitem__(self, index):
|
||||
example = {}
|
||||
example["prompt"] = self.prompt
|
||||
example["index"] = index
|
||||
return example
|
60
examples/hra_dreambooth/utils/tracemalloc.py
Normal file
60
examples/hra_dreambooth/utils/tracemalloc.py
Normal file
@ -0,0 +1,60 @@
|
||||
# adapted from [peft's boft_dreambooth](https://github.com/huggingface/peft/tree/main/examples/boft_dreambooth)
|
||||
|
||||
import gc
|
||||
import threading
|
||||
|
||||
import psutil
|
||||
import torch
|
||||
|
||||
|
||||
# Converting Bytes to Megabytes
|
||||
def b2mb(x):
|
||||
return int(x / 2**20)
|
||||
|
||||
|
||||
# This context manager is used to track the peak memory usage of the process
|
||||
class TorchTracemalloc:
|
||||
def __enter__(self):
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
|
||||
self.begin = torch.cuda.memory_allocated()
|
||||
self.process = psutil.Process()
|
||||
|
||||
self.cpu_begin = self.cpu_mem_used()
|
||||
self.peak_monitoring = True
|
||||
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
|
||||
peak_monitor_thread.daemon = True
|
||||
peak_monitor_thread.start()
|
||||
return self
|
||||
|
||||
def cpu_mem_used(self):
|
||||
"""get resident set size memory for the current process"""
|
||||
return self.process.memory_info().rss
|
||||
|
||||
def peak_monitor_func(self):
|
||||
self.cpu_peak = -1
|
||||
|
||||
while True:
|
||||
self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak)
|
||||
|
||||
# can't sleep or will not catch the peak right (this comment is here on purpose)
|
||||
# time.sleep(0.001) # 1msec
|
||||
|
||||
if not self.peak_monitoring:
|
||||
break
|
||||
|
||||
def __exit__(self, *exc):
|
||||
self.peak_monitoring = False
|
||||
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
self.end = torch.cuda.memory_allocated()
|
||||
self.peak = torch.cuda.max_memory_allocated()
|
||||
self.used = b2mb(self.end - self.begin)
|
||||
self.peaked = b2mb(self.peak - self.begin)
|
||||
|
||||
self.cpu_end = self.cpu_mem_used()
|
||||
self.cpu_used = b2mb(self.cpu_end - self.cpu_begin)
|
||||
self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin)
|
||||
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
|
@ -1008,7 +1008,7 @@
|
||||
"args = TrainingArguments(\n",
|
||||
" f\"{model_name}-finetuned-lora-food101\",\n",
|
||||
" remove_unused_columns=False,\n",
|
||||
" evaluation_strategy=\"epoch\",\n",
|
||||
" eval_strategy=\"epoch\",\n",
|
||||
" save_strategy=\"epoch\",\n",
|
||||
" learning_rate=5e-3,\n",
|
||||
" per_device_train_batch_size=batch_size,\n",
|
||||
|
@ -819,7 +819,7 @@
|
||||
"\n",
|
||||
"training_args = TrainingArguments(\n",
|
||||
" \"temp\",\n",
|
||||
" evaluation_strategy=\"epoch\",\n",
|
||||
" eval_strategy=\"epoch\",\n",
|
||||
" learning_rate=1e-3,\n",
|
||||
" gradient_accumulation_steps=1,\n",
|
||||
" auto_find_batch_size=True,\n",
|
||||
|
@ -1246,7 +1246,7 @@
|
||||
" learning_rate=1e-3,\n",
|
||||
" warmup_steps=50,\n",
|
||||
" num_train_epochs=3,\n",
|
||||
" evaluation_strategy=\"epoch\",\n",
|
||||
" eval_strategy=\"epoch\",\n",
|
||||
" fp16=True,\n",
|
||||
" per_device_eval_batch_size=8,\n",
|
||||
" generation_max_length=128,\n",
|
||||
|
84
examples/olora_finetuning/README.md
Normal file
84
examples/olora_finetuning/README.md
Normal file
@ -0,0 +1,84 @@
|
||||
# OLoRA: Orthonormal Low Rank Adaptation of Large Language Models
|
||||
|
||||
## Introduction
|
||||
[OLoRA](https://arxiv.org/abs/2406.01775) is a novel approach that leverages orthonormal low rank adaptation through QR decomposition. Unlike the default LoRA implementation, OLoRA decomposes original weights into their $\mathbf{Q}$ and $\mathbf{R}$ parts, and then uses the first `rank` rows of $\mathbf{R}$ and the first `rank` columns of $\mathbf{Q}$ to initialize $\mathbf{A}$ and $\mathbf{B}$, respectively. This results in significantly faster convergence, more stable training, and superior performance.
|
||||
|
||||
## Quick start
|
||||
```python
|
||||
import torch
|
||||
from peft import LoraConfig, get_peft_model
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
from trl import SFTTrainer
|
||||
from datasets import load_dataset
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.bfloat16, device_map="auto")
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
|
||||
dataset = load_dataset("imdb", split="train[:1%]")
|
||||
lora_config = LoraConfig(
|
||||
init_lora_weights="olora"
|
||||
)
|
||||
peft_model = get_peft_model(model, lora_config)
|
||||
trainer = SFTTrainer(
|
||||
model=peft_model,
|
||||
train_dataset=dataset,
|
||||
dataset_text_field="text",
|
||||
max_seq_length=512,
|
||||
tokenizer=tokenizer,
|
||||
)
|
||||
trainer.train()
|
||||
peft_model.save_pretrained("olora-opt-350m")
|
||||
```
|
||||
|
||||
There is no additional change needed to your standard LoRA procedure, except for specifying `init_lora_weights = "olora"` option in your lora configuration.
|
||||
|
||||
Additionally you can refer to olora finetuning script.
|
||||
Run the script simply by running:
|
||||
```bash
|
||||
python3 examples/olora_finetuning/olora_finetuning.py --base_model facebook/opt-350m
|
||||
```
|
||||
OLoRA also supports quantization. To use 4-bit quantization try:
|
||||
```bash
|
||||
python3 examples/olora_finetuning/olora_finetuning.py --base_model facebook/opt-350m --quantize
|
||||
```
|
||||
|
||||
|
||||
## Use the model
|
||||
You can load and use the model as any other 🤗 PEFT model
|
||||
```python
|
||||
from peft import PeftModel
|
||||
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
|
||||
olora_model = PeftModel.from_pretrained(model, "olora-opt-350m")
|
||||
```
|
||||
|
||||
## OLoRA and LoRA
|
||||
OLoRA differs from LoRA in that it mutates the original weights. To utilize multiple adapters simultaneously, you can leverage the `path_initial_model_for_weight_conversion` option. Below is a simple template illustrating how to convert OLoRA to conventional LoRA:
|
||||
```python
|
||||
base_model = AutoModel.from_pretrained("facebook/opt-350m")
|
||||
olora_config = LoraConfig(
|
||||
...
|
||||
init_lora_weights = "olora" # Initialize the model with OLoRA
|
||||
)
|
||||
olora_model = get_peft_model(base_model, olora_config)
|
||||
init_path = <path-to-untrained-olora-model>
|
||||
olora_model.save_pretrained(init_path) # Save the model *before* performing any training
|
||||
|
||||
# Train the model
|
||||
train(olora_model) # Your training loop
|
||||
|
||||
#Save the model after training
|
||||
olora_model.save_pretrained(output_dir, path_initial_model_for_weight_conversion=init_path)
|
||||
```
|
||||
After completing training, you can save and convert your OLoRA model to a conventional LoRA model by setting `path_initial_model_for_weight_conversion` to `init_path`, that is the path of your untrained OLoRA model. This conversion enables you to use multiple adapters with your LoRA model. Note that this conversion is not supported if `rslora` is used in combination with `rank_pattern` or `alpha_pattern`.
|
||||
|
||||
## Citation
|
||||
```
|
||||
@misc{büyükakyüz2024olora,
|
||||
title={OLoRA: Orthonormal Low-Rank Adaptation of Large Language Models},
|
||||
author={Kerim Büyükakyüz},
|
||||
year={2024},
|
||||
eprint={2406.01775},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CL}
|
||||
}
|
||||
```
|
184
examples/olora_finetuning/olora_finetuning.py
Normal file
184
examples/olora_finetuning/olora_finetuning.py
Normal file
@ -0,0 +1,184 @@
|
||||
# Copyright 2024-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from typing import List
|
||||
|
||||
import torch
|
||||
import transformers
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
||||
|
||||
from peft import (
|
||||
LoraConfig,
|
||||
get_peft_model,
|
||||
)
|
||||
|
||||
|
||||
def train(
|
||||
base_model: str = "path/to/model",
|
||||
data_path: str = "yahma/alpaca-cleaned",
|
||||
output_dir: str = "olora",
|
||||
batch_size: int = 16,
|
||||
num_epochs: int = 1,
|
||||
learning_rate: float = 3e-4,
|
||||
cutoff_len: int = 256,
|
||||
val_set_size: int = 16,
|
||||
quantize: bool = False,
|
||||
eval_step: int = 100,
|
||||
save_step: int = 100,
|
||||
device_map: str = "auto",
|
||||
lora_r: int = 32,
|
||||
lora_alpha: int = 16,
|
||||
lora_dropout: float = 0.05,
|
||||
lora_target_modules: List[str] = None,
|
||||
init_lora_weights="olora",
|
||||
):
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
base_model,
|
||||
device_map=device_map,
|
||||
quantization_config=BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_compute_dtype=torch.bfloat16,
|
||||
bnb_4bit_use_double_quant=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
)
|
||||
if quantize
|
||||
else None,
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
|
||||
|
||||
def tokenize(prompt, add_eos_token=True):
|
||||
result = tokenizer(
|
||||
prompt,
|
||||
truncation=True,
|
||||
max_length=cutoff_len,
|
||||
padding=False,
|
||||
return_tensors=None,
|
||||
)
|
||||
if (
|
||||
result["input_ids"][-1] != tokenizer.eos_token_id
|
||||
and len(result["input_ids"]) < cutoff_len
|
||||
and add_eos_token
|
||||
):
|
||||
result["input_ids"].append(tokenizer.eos_token_id)
|
||||
result["attention_mask"].append(1)
|
||||
|
||||
result["labels"] = result["input_ids"].copy()
|
||||
|
||||
return result
|
||||
|
||||
def generate_and_tokenize_prompt(example):
|
||||
full_prompt = generate_prompt(example)
|
||||
tokenized_full_prompt = tokenize(full_prompt)
|
||||
return tokenized_full_prompt
|
||||
|
||||
config = LoraConfig(
|
||||
r=lora_r,
|
||||
lora_alpha=lora_alpha,
|
||||
target_modules=lora_target_modules,
|
||||
lora_dropout=lora_dropout,
|
||||
bias="none",
|
||||
task_type="CAUSAL_LM",
|
||||
init_lora_weights=init_lora_weights,
|
||||
)
|
||||
model = get_peft_model(model, config)
|
||||
|
||||
data = load_dataset(data_path)
|
||||
|
||||
train_val = data["train"].train_test_split(test_size=val_set_size, shuffle=True, seed=42)
|
||||
train_data = train_val["train"].shuffle().map(generate_and_tokenize_prompt)
|
||||
val_data = train_val["test"].shuffle().map(generate_and_tokenize_prompt)
|
||||
|
||||
trainer = transformers.Trainer(
|
||||
model=model,
|
||||
train_dataset=train_data,
|
||||
eval_dataset=val_data,
|
||||
args=transformers.TrainingArguments(
|
||||
per_device_train_batch_size=batch_size,
|
||||
warmup_steps=100,
|
||||
num_train_epochs=num_epochs,
|
||||
learning_rate=learning_rate,
|
||||
fp16=True,
|
||||
logging_steps=100,
|
||||
optim="adamw_torch",
|
||||
evaluation_strategy="steps",
|
||||
save_strategy="steps",
|
||||
eval_steps=eval_step,
|
||||
save_steps=save_step,
|
||||
output_dir=output_dir,
|
||||
save_total_limit=3,
|
||||
load_best_model_at_end=True,
|
||||
),
|
||||
data_collator=transformers.DataCollatorForSeq2Seq(
|
||||
tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
|
||||
),
|
||||
)
|
||||
trainer.train()
|
||||
model.save_pretrained(output_dir)
|
||||
|
||||
|
||||
def generate_prompt(example):
|
||||
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
||||
### Instruction:
|
||||
{example["instruction"]}
|
||||
### Response:
|
||||
{example["output"]}"""
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--base_model", type=str, default="path/to/model")
|
||||
parser.add_argument("--data_path", type=str, default="yahma/alpaca-cleaned")
|
||||
parser.add_argument("--output_dir", type=str, default="olora")
|
||||
parser.add_argument("--batch_size", type=int, default=16)
|
||||
parser.add_argument("--num_epochs", type=int, default=1)
|
||||
parser.add_argument("--learning_rate", type=float, default=3e-4)
|
||||
parser.add_argument("--cutoff_len", type=int, default=256)
|
||||
parser.add_argument("--val_set_size", type=int, default=16)
|
||||
parser.add_argument("--quantize", action="store_true")
|
||||
parser.add_argument("--eval_step", type=int, default=100)
|
||||
parser.add_argument("--save_step", type=int, default=100)
|
||||
parser.add_argument("--device_map", type=str, default="auto")
|
||||
parser.add_argument("--lora_r", type=int, default=32)
|
||||
parser.add_argument("--lora_alpha", type=int, default=16)
|
||||
parser.add_argument("--lora_dropout", type=float, default=0.05)
|
||||
parser.add_argument("--lora_target_modules", type=str, default=None)
|
||||
parser.add_argument("--init_lora_weights", type=str, default="olora")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
train(
|
||||
base_model=args.base_model,
|
||||
data_path=args.data_path,
|
||||
output_dir=args.output_dir,
|
||||
batch_size=args.batch_size,
|
||||
num_epochs=args.num_epochs,
|
||||
learning_rate=args.learning_rate,
|
||||
cutoff_len=args.cutoff_len,
|
||||
val_set_size=args.val_set_size,
|
||||
quantize=args.quantize,
|
||||
eval_step=args.eval_step,
|
||||
save_step=args.save_step,
|
||||
device_map=args.device_map,
|
||||
lora_r=args.lora_r,
|
||||
lora_alpha=args.lora_alpha,
|
||||
lora_dropout=args.lora_dropout,
|
||||
lora_target_modules=args.lora_target_modules,
|
||||
init_lora_weights=args.init_lora_weights,
|
||||
)
|
131
examples/pissa_finetuning/README.md
Normal file
131
examples/pissa_finetuning/README.md
Normal file
@ -0,0 +1,131 @@
|
||||
# PiSSA: Principal Singular values and Singular vectors Adaptation
|
||||
## Introduction ([Paper](https://arxiv.org/abs/2404.02948), [code](https://github.com/GraphPKU/PiSSA))
|
||||
PiSSA represents a matrix $W\in\mathbb{R}^{m\times n}$ within the model by the product of two trainable matrices $A \in \mathbb{R}^{m\times r}$ and $B \in \mathbb{R}^{r\times n}$, where $r \ll \min(m, n)$, plus a residual matrix $W^{res}\in\mathbb{R}^{m\times n}$ for error correction. Singular value decomposition (SVD) is employed to factorize $W$, and the principal singular values and vectors of $W$ are utilized to initialize $A$ and $B$. The residual singular values and vectors initialize the residual matrix $W^{res}$, which keeps frozen during fine-tuning. This straightforward modification allows PiSSA to converge more rapidly than LoRA and ultimately attain superior performance. Moreover, PiSSA reduces the quantization error compared to QLoRA, leading to further enhancements.
|
||||
|
||||
## Quick Start
|
||||
```python
|
||||
import torch
|
||||
from peft import LoraConfig, get_peft_model
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
from trl import SFTTrainer
|
||||
from datasets import load_dataset
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", torch_dtype=torch.bfloat16, device_map="auto")
|
||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
||||
tokenizer.pad_token_id = tokenizer.eos_token_id
|
||||
lora_config = LoraConfig(
|
||||
# init_lora_weights="pissa", # Configure the initialization method to "pissa", which may take several minutes to execute SVD on the pre-trained model.
|
||||
init_lora_weights="pissa_niter_4", # Initialize the PiSSA with fast SVD, which completes in just a few seconds.
|
||||
)
|
||||
peft_model = get_peft_model(model, lora_config)
|
||||
|
||||
peft_model.print_trainable_parameters()
|
||||
|
||||
dataset = load_dataset("imdb", split="train[:1%]")
|
||||
|
||||
trainer = SFTTrainer(
|
||||
model=peft_model,
|
||||
train_dataset=dataset,
|
||||
dataset_text_field="text",
|
||||
max_seq_length=128,
|
||||
tokenizer=tokenizer,
|
||||
)
|
||||
trainer.train()
|
||||
peft_model.save_pretrained("pissa-llama-2-7b")
|
||||
```
|
||||
When utilizing fast SVD, reducing the rank and the number of iterations decreases the time required. However, this approach leads to higher errors in the computed matrices $A$ and $B$. To preserve the model's initial capabilities, we calculate the residual matrix by $W^{res} = W - BA$. Even with potential errors in $A$ and $B$, the sum of $W^{res}$ and $BA$ accurately equals $W$.
|
||||
|
||||
|
||||
To utilize the fine-tuned PiSSA modules, simply run the following command:
|
||||
```python
|
||||
import torch
|
||||
from peft import PeftModel
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"meta-llama/Llama-2-7b-hf", torch_dtype=torch.bfloat16, device_map="auto"
|
||||
)
|
||||
# Performs SVD again to initialize the residual model and loads the state_dict of the fine-tuned PiSSA modules.
|
||||
peft_model = PeftModel.from_pretrained(model, "pissa-llama-2-7b")
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Access the preprocessed models
|
||||
We recommend downloading decomposed models directly from the [Hugging Face Collections](https://huggingface.co/collections/fxmeng/pissa-661ce700721235e542a5d7a8) instead of performing SVD every time.
|
||||
If the existing models do not meet your needs, apply PiSSA initialization to a pre-trained model and store the decomposed model locally:
|
||||
```bash
|
||||
python preprocess.py \
|
||||
--base_model_name_or_path meta-llama/Llama-2-7b-hf \
|
||||
--init_lora_weights pissa \
|
||||
--output_dir pissa-llama-2-7b-r32-alpha-32 \
|
||||
--lora_r 32 \
|
||||
--lora_alpha 32 \
|
||||
--lora_dropout 0 \
|
||||
--bits bf16
|
||||
```
|
||||
|
||||
### Convert PiSSA to LoRA
|
||||
The main advantage of PiSSA is concentrated during the training phase. For a trained PiSSA adapter, we recommend converting it equivalently to the LoRA adapter for using and sharing.
|
||||
```python
|
||||
# The fine-tuned matrices $A$ and $B$ in PiSSA adapter is saved and should be combined with the residual model.
|
||||
peft_model.save_pretrained(output_dir)
|
||||
# Given the matrices $A_0$ and $B_0$, initialized by PiSSA and untrained, and the trained matrices $A$ and $B$,
|
||||
# we can convert these to LoRA by setting $\Delta W = A \times B - A_0 \times B_0 = [A \mid A_0] \times [B \mid -B_0]^T = A'B'$.
|
||||
peft_model.save_pretrained(output_dir, convert_pissa_to_lora="pissa_init")
|
||||
|
||||
```
|
||||
This conversion enables the loading of LoRA on top of a standard base model:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from peft import PeftModel
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"meta-llama/Llama-2-7b-hf", torch_dtype=torch.bfloat16, device_map="auto"
|
||||
)
|
||||
# No SVD is performed during this step, and the base model remains unaltered.
|
||||
peft_model = PeftModel.from_pretrained(model, "pissa-llama-2-7b-lora")
|
||||
```
|
||||
Utilizing the converted LoRA does not require modifying the parameters of the base model. When multiple converted LoRAs are needed simultaneously, each adapter operates independently without interference, allowing for the adapters to be freely deleted or added.
|
||||
|
||||
Note that this conversion is not supported if `rslora` is used in combination with `rank_pattern` or `alpha_pattern`.
|
||||
|
||||
### Fine-tune in 4-bit or 8-bit
|
||||
If quantization fine-tuning is desired, it is necessary to first decompose the original model at full precision and then reload the residual model in either 4-bit or 8-bit configurations.
|
||||
```shell
|
||||
python pissa_finetuning.py \
|
||||
--residual_model_name_or_path fxmeng/pissa-llama-2-7b-r16-alpha-16 \
|
||||
--output_dir output/pissa-llama-2-7b-r16-alpha-16-metamath-10k \
|
||||
--bits nf4 \
|
||||
--data_path meta-math/MetaMathQA \
|
||||
--dataset_split train[:100000] \
|
||||
--dataset_field query response \
|
||||
--bf16 True \
|
||||
--num_train_epochs 1 \
|
||||
--per_device_train_batch_size 32 \
|
||||
--gradient_accumulation_steps 4 \
|
||||
--save_strategy "steps" \
|
||||
--save_steps 1000 \
|
||||
--save_total_limit 1 \
|
||||
--logging_steps 1 \
|
||||
--learning_rate 2e-5 \
|
||||
--weight_decay 0. \
|
||||
--warmup_ratio 0.03 \
|
||||
--tf32 True \
|
||||
--report_to none \
|
||||
--convert_pissa_to_lora
|
||||
```
|
||||
|
||||
This approach ensures the preservation of high-frequency, out-of-distribution parameters in the low-rank PiSSA modules, resulting in reduced quantization errors during the quantization of the residual model.
|
||||
|
||||
## Citation
|
||||
```
|
||||
@article{meng2024pissa,
|
||||
title={PiSSA: Principal Singular Values and Singular Vectors Adaptation of Large Language Models},
|
||||
author={Meng, Fanxu and Wang, Zhaohui and Zhang, Muhan},
|
||||
journal={arXiv preprint arXiv:2404.02948},
|
||||
year={2024}
|
||||
}
|
||||
```
|
156
examples/pissa_finetuning/pissa_finetuning.py
Normal file
156
examples/pissa_finetuning/pissa_finetuning.py
Normal file
@ -0,0 +1,156 @@
|
||||
# Copyright 2023-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from typing import List, Optional
|
||||
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments
|
||||
from trl import SFTTrainer
|
||||
|
||||
from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training
|
||||
|
||||
|
||||
@dataclass
|
||||
class TrainingArguments(TrainingArguments):
|
||||
# model configs
|
||||
base_model_name_or_path: Optional[str] = field(
|
||||
default=None, metadata={"help": "The name or path of the fp32/16 base model."}
|
||||
)
|
||||
residual_model_name_or_path: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "The name or path of the fp32/16 residual model. (`['fxmeng/pissa-llama-2-7b-r16-alpha-16']`)"
|
||||
},
|
||||
)
|
||||
bits: str = field(default="fp32", metadata={"help": "(`['fp4', 'nf4', 'int8', 'bf16', 'fp16', fp32]`)"})
|
||||
init_lora_weights: str = field(default="pissa", metadata={"help": "(`['gaussian', 'pissa', 'pissa_niter_4']`)"})
|
||||
lora_r: int = field(default=16)
|
||||
lora_alpha: int = field(default=16)
|
||||
lora_dropout: float = field(default=0)
|
||||
convert_pissa_to_lora: bool = field(default=False)
|
||||
merge_and_save: bool = field(default=False)
|
||||
# dataset configs
|
||||
data_path: str = field(default="imdb", metadata={"help": "Path to the training data."})
|
||||
dataset_split: str = field(default="train[:1%]", metadata={"help": "(`['train', 'test', 'eval']`):"})
|
||||
dataset_field: List[str] = field(default=None, metadata={"help": "Fields of dataset input and output."})
|
||||
max_seq_length: int = field(
|
||||
default=512,
|
||||
metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
|
||||
)
|
||||
|
||||
|
||||
parser = HfArgumentParser(TrainingArguments)
|
||||
script_args = parser.parse_args_into_dataclasses()[0]
|
||||
print(script_args)
|
||||
|
||||
print(f"Load pre-processed residual model in {script_args.bits} bits.")
|
||||
if script_args.bits in ["nf4", "fp4", "int8"]:
|
||||
quantization_config = BitsAndBytesConfig(
|
||||
load_in_4bit=(script_args.bits == "nf4" or script_args.bits == "fp4"),
|
||||
load_in_8bit=script_args.bits == "int8",
|
||||
bnb_4bit_quant_type=script_args.bits,
|
||||
bnb_4bit_use_double_quant=True,
|
||||
bnb_4bit_compute_dtype=torch.bfloat16,
|
||||
)
|
||||
res_model = AutoModelForCausalLM.from_pretrained(
|
||||
script_args.residual_model_name_or_path, quantization_config=quantization_config, low_cpu_mem_usage=True
|
||||
)
|
||||
res_model = prepare_model_for_kbit_training(res_model)
|
||||
print("Wrapping the residual model with PiSSA.")
|
||||
peft_model = PeftModel.from_pretrained(
|
||||
res_model, script_args.residual_model_name_or_path, subfolder="pissa_init", is_trainable=True
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(script_args.residual_model_name_or_path)
|
||||
|
||||
elif script_args.residual_model_name_or_path is not None:
|
||||
res_model = AutoModelForCausalLM.from_pretrained(
|
||||
script_args.residual_model_name_or_path,
|
||||
torch_dtype=(
|
||||
torch.float16
|
||||
if script_args.bits == "fp16"
|
||||
else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32)
|
||||
),
|
||||
device_map="auto",
|
||||
)
|
||||
print("Wrapping the residual model with PiSSA.")
|
||||
peft_model = PeftModel.from_pretrained(
|
||||
res_model, script_args.residual_model_name_or_path, subfolder="pissa_init", is_trainable=True
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(script_args.residual_model_name_or_path)
|
||||
|
||||
elif script_args.base_model_name_or_path is not None:
|
||||
print(
|
||||
f"No available pre-processed model, manually initialize a PiSSA using {script_args.base_model_name_or_path}."
|
||||
)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
script_args.base_model_name_or_path,
|
||||
torch_dtype=(
|
||||
torch.float16
|
||||
if script_args.bits == "fp16"
|
||||
else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32)
|
||||
),
|
||||
device_map="auto",
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path)
|
||||
tokenizer.pad_token_id = tokenizer.eos_token_id
|
||||
lora_config = LoraConfig(
|
||||
r=script_args.lora_r,
|
||||
lora_alpha=script_args.lora_alpha,
|
||||
init_lora_weights=script_args.init_lora_weights,
|
||||
lora_dropout=script_args.lora_dropout,
|
||||
target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"],
|
||||
bias="none",
|
||||
task_type="CAUSAL_LM",
|
||||
)
|
||||
peft_model = get_peft_model(model, lora_config)
|
||||
|
||||
print(peft_model)
|
||||
peft_model.print_trainable_parameters()
|
||||
|
||||
print(f"Training PiSSA with trl on the {script_args.data_path}[{script_args.dataset_split}] dataset.")
|
||||
dataset = load_dataset(script_args.data_path, split=script_args.dataset_split)
|
||||
dataset = dataset.map(
|
||||
lambda example: {
|
||||
"text": f"### USER: {example[script_args.dataset_field[0]]}\n### ASSISTANT: {example[script_args.dataset_field[1]]}"
|
||||
}
|
||||
)
|
||||
|
||||
trainer = SFTTrainer(
|
||||
model=peft_model,
|
||||
args=script_args,
|
||||
train_dataset=dataset,
|
||||
dataset_text_field="text",
|
||||
max_seq_length=script_args.max_seq_length,
|
||||
tokenizer=tokenizer,
|
||||
)
|
||||
trainer.train()
|
||||
trainer.save_state()
|
||||
############################## Upon training completion, convert and save PiSSA in LoRA format ##############################
|
||||
if script_args.convert_pissa_to_lora:
|
||||
peft_model.save_pretrained(
|
||||
os.path.join(script_args.output_dir, "pissa_lora"),
|
||||
convert_pissa_to_lora=os.path.join(script_args.residual_model_name_or_path, "pissa_init"),
|
||||
)
|
||||
else:
|
||||
peft_model.save_pretrained(
|
||||
os.path.join(script_args.output_dir, "pissa_ft"),
|
||||
)
|
||||
|
||||
if script_args.merge_and_save:
|
||||
model = peft_model.merge_and_unload()
|
||||
model.save_pretrained(os.path.join(script_args.output_dir, "pissa_merged"))
|
||||
tokenizer.save_pretrained(os.path.join(script_args.output_dir, "pissa_merged"))
|
70
examples/pissa_finetuning/preprocess.py
Normal file
70
examples/pissa_finetuning/preprocess.py
Normal file
@ -0,0 +1,70 @@
|
||||
# Copyright 2023-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from peft import LoraConfig, get_peft_model
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--base_model_name_or_path",
|
||||
description="Merge Adapter to Base Model",
|
||||
help="The name or path of the fp32/16 base model.",
|
||||
)
|
||||
parser.add_argument("--output_dir", type=str, help="The directory to save the PiSSA model.")
|
||||
parser.add_argument("--bits", type=str, default="bf16", choices=["bf16", "fp16", "fp32"])
|
||||
parser.add_argument(
|
||||
"--init_lora_weights", type=str, default="pissa", help="(`['pissa', 'pissa_niter_[number of iters]']`)"
|
||||
)
|
||||
parser.add_argument("--lora_r", type=int, default=128)
|
||||
parser.add_argument("--lora_alpha", type=int, default=128)
|
||||
parser.add_argument("--lora_dropout", type=int, default=0)
|
||||
script_args = parser.parse_args()
|
||||
print(script_args)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
script_args.base_model_name_or_path,
|
||||
torch_dtype=(
|
||||
torch.float16
|
||||
if script_args.bits == "fp16"
|
||||
else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32)
|
||||
),
|
||||
device_map="auto",
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path)
|
||||
tokenizer.pad_token_id = tokenizer.eos_token_id
|
||||
lora_config = LoraConfig(
|
||||
r=script_args.lora_r,
|
||||
lora_alpha=script_args.lora_alpha,
|
||||
init_lora_weights=script_args.init_lora_weights,
|
||||
lora_dropout=script_args.lora_dropout,
|
||||
target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"],
|
||||
bias="none",
|
||||
task_type="CAUSAL_LM",
|
||||
)
|
||||
peft_model = get_peft_model(model, lora_config)
|
||||
|
||||
# Save PiSSA modules:
|
||||
peft_model.peft_config["default"].init_lora_weights = True
|
||||
peft_model.save_pretrained(os.path.join(script_args.output_dir, "pissa_init"))
|
||||
# Save residual model:
|
||||
peft_model = peft_model.unload()
|
||||
peft_model.save_pretrained(script_args.output_dir)
|
||||
# Save the tokenizer:
|
||||
tokenizer.save_pretrained(script_args.output_dir)
|
@ -973,7 +973,7 @@
|
||||
" per_device_eval_batch_size=batch_size,\n",
|
||||
" learning_rate=lr,\n",
|
||||
" num_train_epochs=num_epochs,\n",
|
||||
" evaluation_strategy=\"epoch\",\n",
|
||||
" eval_strategy=\"epoch\",\n",
|
||||
" logging_strategy=\"epoch\",\n",
|
||||
" save_strategy=\"no\",\n",
|
||||
" report_to=[],\n",
|
||||
|
@ -587,7 +587,7 @@
|
||||
" per_device_train_batch_size=4,\n",
|
||||
" per_device_eval_batch_size=2,\n",
|
||||
" save_total_limit=3,\n",
|
||||
" evaluation_strategy=\"epoch\",\n",
|
||||
" eval_strategy=\"epoch\",\n",
|
||||
" save_strategy=\"epoch\",\n",
|
||||
" logging_steps=5,\n",
|
||||
" remove_unused_columns=False,\n",
|
||||
|
556
examples/sequence_classification/FourierFT.ipynb
Normal file
556
examples/sequence_classification/FourierFT.ipynb
Normal file
@ -0,0 +1,556 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d36e1e93-ae93-4a4e-93c6-68fd868d2882",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using FourierFT for sequence classification"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ddfc0610-55f6-4343-a950-125ccf0f45ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this example, we fine-tune Roberta (base) on a sequence classification task using FourierFT."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45addd81-d4f3-4dfd-960d-3920d347f0a6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a9935ae2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/zgaoat/anaconda3/envs/pr2/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# To run this notebook, please run `pip install evaluate` to install additional dependencies not covered by PEFT.\n",
|
||||
"import torch\n",
|
||||
"from torch.optim import AdamW\n",
|
||||
"from torch.utils.data import DataLoader\n",
|
||||
"from peft import (\n",
|
||||
" get_peft_model,\n",
|
||||
" FourierFTConfig,\n",
|
||||
" PeftType,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"import evaluate\n",
|
||||
"from datasets import load_dataset\n",
|
||||
"from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed, AutoConfig\n",
|
||||
"from tqdm import tqdm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "62c959bf-7cc2-49e0-b97e-4c10ec3b9bf3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Parameters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e3b13308",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<torch._C.Generator at 0x78e2a49744b0>"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"batch_size = 32\n",
|
||||
"model_name_or_path = \"roberta-base\"\n",
|
||||
"task = \"mrpc\"\n",
|
||||
"peft_type = PeftType.FOURIERFT\n",
|
||||
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
|
||||
"num_epochs = 5 # for better results, increase this number\n",
|
||||
"n_frequency = 1000 # for better results, increase this number\n",
|
||||
"scaling = 150.0\n",
|
||||
"max_length = 512\n",
|
||||
"torch.manual_seed(0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "0526f571",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"peft_config = FourierFTConfig(\n",
|
||||
" task_type=\"SEQ_CLS\", \n",
|
||||
" n_frequency=n_frequency,\n",
|
||||
" target_modules=[\"query\", \"value\"],\n",
|
||||
" scaling = scaling,\n",
|
||||
")\n",
|
||||
"head_lr = 6e-3 # the learning rate for the classification head for NLU tasks\n",
|
||||
"fft_lr = 6e-2 # the learning rate for the parameters other than the classification head (q,v in this case)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c075c5d2-a457-4f37-a7f1-94fd0d277972",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "7bb52cb4-d1c3-4b04-8bf0-f39ca88af139",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if any(k in model_name_or_path for k in (\"gpt\", \"opt\", \"bloom\")):\n",
|
||||
" padding_side = \"left\"\n",
|
||||
"else:\n",
|
||||
" padding_side = \"right\"\n",
|
||||
"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side)\n",
|
||||
"if getattr(tokenizer, \"pad_token_id\") is None:\n",
|
||||
" tokenizer.pad_token_id = tokenizer.eos_token_id"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e69c5e1f-d27b-4264-a41e-fc9b99d025e6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datasets = load_dataset(\"glue\", task)\n",
|
||||
"metric = evaluate.load(\"glue\", task)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0209f778-c93b-40eb-a4e0-24c25db03980",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def tokenize_function(examples):\n",
|
||||
" # max_length=None => use the model max length (it's actually the default)\n",
|
||||
" outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=max_length)\n",
|
||||
" return outputs\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tokenized_datasets = datasets.map(\n",
|
||||
" tokenize_function,\n",
|
||||
" batched=True,\n",
|
||||
" remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n",
|
||||
"# transformers library\n",
|
||||
"tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "7453954e-982c-46f0-b09c-589776e6d6cb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def collate_fn(examples):\n",
|
||||
" return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Instantiate dataloaders.\n",
|
||||
"train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size)\n",
|
||||
"eval_dataloader = DataLoader(\n",
|
||||
" tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f3b9b2e8-f415-4d0f-9fb4-436f1a3585ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Preparing the FourierFT model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2ed5ac74",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-base and are newly initialized: ['classifier.dense.bias', 'classifier.dense.weight', 'classifier.out_proj.bias', 'classifier.out_proj.weight']\n",
|
||||
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"trainable params: 616,130 || all params: 125,263,300 || trainable%: 0.4919\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True, max_length=None)\n",
|
||||
"model = get_peft_model(model, peft_config)\n",
|
||||
"model.print_trainable_parameters()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0d2d0381",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"head_param = list(map(id, model.classifier.parameters()))\n",
|
||||
"\n",
|
||||
"others_param = filter(lambda p: id(p) not in head_param, model.parameters()) \n",
|
||||
"\n",
|
||||
"optimizer = AdamW([\n",
|
||||
" {\"params\": model.classifier.parameters(), \"lr\": head_lr},\n",
|
||||
" {\"params\": others_param, \"lr\": fft_lr}\n",
|
||||
"],weight_decay=0.)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Instantiate scheduler\n",
|
||||
"lr_scheduler = get_linear_schedule_with_warmup(\n",
|
||||
" optimizer=optimizer,\n",
|
||||
" num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs),\n",
|
||||
" num_training_steps=(len(train_dataloader) * num_epochs),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c0dd5aa8-977b-4ac0-8b96-884b17bcdd00",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "fa0e73be",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" 0%| | 0/115 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n",
|
||||
"100%|██████████| 115/115 [00:06<00:00, 19.03it/s]\n",
|
||||
"100%|██████████| 13/13 [00:00<00:00, 41.72it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 0: {'accuracy': 0.8161764705882353, 'f1': 0.8709122203098106}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:05<00:00, 20.61it/s]\n",
|
||||
"100%|██████████| 13/13 [00:00<00:00, 42.91it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 1: {'accuracy': 0.8480392156862745, 'f1': 0.8966666666666666}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:05<00:00, 20.63it/s]\n",
|
||||
"100%|██████████| 13/13 [00:00<00:00, 42.65it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 2: {'accuracy': 0.8676470588235294, 'f1': 0.9075342465753424}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:05<00:00, 20.56it/s]\n",
|
||||
"100%|██████████| 13/13 [00:00<00:00, 42.11it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 3: {'accuracy': 0.8504901960784313, 'f1': 0.8988391376451078}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:05<00:00, 20.50it/s]\n",
|
||||
"100%|██████████| 13/13 [00:00<00:00, 43.15it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 4: {'accuracy': 0.8725490196078431, 'f1': 0.9103448275862069}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.to(device)\n",
|
||||
"for epoch in range(num_epochs):\n",
|
||||
" model.train()\n",
|
||||
" for step, batch in enumerate(tqdm(train_dataloader)):\n",
|
||||
" batch.to(device)\n",
|
||||
" outputs = model(**batch)\n",
|
||||
" loss = outputs.loss\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" lr_scheduler.step()\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
"\n",
|
||||
" model.eval()\n",
|
||||
" for step, batch in enumerate(tqdm(eval_dataloader)):\n",
|
||||
" batch.to(device)\n",
|
||||
" with torch.no_grad():\n",
|
||||
" outputs = model(**batch)\n",
|
||||
" predictions = outputs.logits.argmax(dim=-1)\n",
|
||||
" predictions, references = predictions, batch[\"labels\"]\n",
|
||||
" metric.add_batch(\n",
|
||||
" predictions=predictions,\n",
|
||||
" references=references,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" eval_metric = metric.compute()\n",
|
||||
" print(f\"epoch {epoch}:\", eval_metric)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f2b2caca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Share adapters on the 🤗 Hub"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "7b23af6f-cf6e-486f-9d10-0eada95b631f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"account_id = ... # your Hugging Face Hub account ID"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "990b3c93",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/zgaoat/anaconda3/envs/pr2/lib/python3.11/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"CommitInfo(commit_url='https://huggingface.co/zgaoat/roberta-base-mrpc-peft-fourierft/commit/064eb35cbb7a1073b4d8fafbeccee43a0a4e37c9', commit_message='Upload model', commit_description='', oid='064eb35cbb7a1073b4d8fafbeccee43a0a4e37c9', pr_url=None, pr_revision=None, pr_num=None)"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.push_to_hub(f\"{account_id}/roberta-base-mrpc-peft-fourierft\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9d140b26",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load adapters from the Hub\n",
|
||||
"\n",
|
||||
"You can also directly load adapters from the Hub using the commands below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "c283e028-b349-46b0-a20e-cde0ee5fbd7b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from peft import PeftModel, PeftConfig\n",
|
||||
"from transformers import AutoTokenizer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "320b10a0-4ea8-4786-9f3c-4670019c6b18",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-base and are newly initialized: ['classifier.dense.bias', 'classifier.dense.weight', 'classifier.out_proj.bias', 'classifier.out_proj.weight']\n",
|
||||
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"peft_model_id = f\"{account_id}/roberta-base-mrpc-peft-fourierft\"\n",
|
||||
"config = PeftConfig.from_pretrained(peft_model_id)\n",
|
||||
"inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path)\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "b3a94049-bc01-4f2e-8cf9-66daf24a4402",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load the FourierFT model\n",
|
||||
"inference_model = PeftModel.from_pretrained(inference_model, peft_model_id, config=config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "bd919fef-4e9a-4dc5-a957-7b879cfc5d38",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" 0%| | 0/13 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n",
|
||||
"100%|██████████| 13/13 [00:00<00:00, 43.06it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'accuracy': 0.8725490196078431, 'f1': 0.9103448275862069}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"inference_model.to(device)\n",
|
||||
"inference_model.eval()\n",
|
||||
"for step, batch in enumerate(tqdm(eval_dataloader)):\n",
|
||||
" batch.to(device)\n",
|
||||
" with torch.no_grad():\n",
|
||||
" outputs = inference_model(**batch)\n",
|
||||
" predictions = outputs.logits.argmax(dim=-1)\n",
|
||||
" predictions, references = predictions, batch[\"labels\"]\n",
|
||||
" metric.add_batch(\n",
|
||||
" predictions=predictions,\n",
|
||||
" references=references,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"eval_metric = metric.compute()\n",
|
||||
"print(eval_metric)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
787
examples/sequence_classification/VBLoRA.ipynb
Normal file
787
examples/sequence_classification/VBLoRA.ipynb
Normal file
@ -0,0 +1,787 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d36e1e93-ae93-4a4e-93c6-68fd868d2882",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Using VB-LoRA for sequence classification"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ddfc0610-55f6-4343-a950-125ccf0f45ac",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this example, we fine-tune Roberta on a sequence classification task using VB-LoRA.\n",
|
||||
"\n",
|
||||
"This notebook is adapted from `examples/sequence_classification/VeRA.ipynb`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "45addd81-d4f3-4dfd-960d-3920d347f0a6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a9935ae2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from torch.optim import AdamW\n",
|
||||
"from torch.utils.data import DataLoader\n",
|
||||
"from peft import (\n",
|
||||
" get_peft_model,\n",
|
||||
" VBLoRAConfig,\n",
|
||||
" PeftType,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"import evaluate\n",
|
||||
"from datasets import load_dataset\n",
|
||||
"from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup\n",
|
||||
"from tqdm import tqdm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "62c959bf-7cc2-49e0-b97e-4c10ec3b9bf3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Parameters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e3b13308",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<torch._C.Generator at 0x7f4fc7c3c750>"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"batch_size = 32\n",
|
||||
"model_name_or_path = \"roberta-large\"\n",
|
||||
"task = \"mrpc\"\n",
|
||||
"peft_type = PeftType.VBLORA\n",
|
||||
"device = \"cuda\"\n",
|
||||
"num_epochs = 20\n",
|
||||
"rank = 4\n",
|
||||
"max_length = 128\n",
|
||||
"num_vectors = 90\n",
|
||||
"vector_length = 256\n",
|
||||
"torch.manual_seed(0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "0526f571",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"peft_config = VBLoRAConfig(\n",
|
||||
" task_type=\"SEQ_CLS\", \n",
|
||||
" r=rank,\n",
|
||||
" topk=2,\n",
|
||||
" target_modules=['key', 'value', 'query', 'output.dense', 'intermediate.dense'],\n",
|
||||
" num_vectors=num_vectors,\n",
|
||||
" vector_length=vector_length,\n",
|
||||
" save_only_topk_weights=True, # Set to True to reduce storage space. Note that the saved parameters cannot be used to resume training from checkpoints.\n",
|
||||
" vblora_dropout=0.,\n",
|
||||
")\n",
|
||||
"head_lr = 4e-3\n",
|
||||
"vector_bank_lr = 1e-3\n",
|
||||
"logits_lr = 1e-2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c075c5d2-a457-4f37-a7f1-94fd0d277972",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Loading data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "7bb52cb4-d1c3-4b04-8bf0-f39ca88af139",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if any(k in model_name_or_path for k in (\"gpt\", \"opt\", \"bloom\")):\n",
|
||||
" padding_side = \"left\"\n",
|
||||
"else:\n",
|
||||
" padding_side = \"right\"\n",
|
||||
"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side)\n",
|
||||
"if getattr(tokenizer, \"pad_token_id\") is None:\n",
|
||||
" tokenizer.pad_token_id = tokenizer.eos_token_id"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "e69c5e1f-d27b-4264-a41e-fc9b99d025e6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"datasets = load_dataset(\"glue\", task)\n",
|
||||
"metric = evaluate.load(\"glue\", task)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "0209f778-c93b-40eb-a4e0-24c25db03980",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def tokenize_function(examples):\n",
|
||||
" # max_length=None => use the model max length (it's actually the default)\n",
|
||||
" outputs = tokenizer(examples[\"sentence1\"], examples[\"sentence2\"], truncation=True, max_length=max_length)\n",
|
||||
" return outputs\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"tokenized_datasets = datasets.map(\n",
|
||||
" tokenize_function,\n",
|
||||
" batched=True,\n",
|
||||
" remove_columns=[\"idx\", \"sentence1\", \"sentence2\"],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n",
|
||||
"# transformers library\n",
|
||||
"tokenized_datasets = tokenized_datasets.rename_column(\"label\", \"labels\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "7453954e-982c-46f0-b09c-589776e6d6cb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def collate_fn(examples):\n",
|
||||
" return tokenizer.pad(examples, padding=\"longest\", return_tensors=\"pt\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Instantiate dataloaders.\n",
|
||||
"train_dataloader = DataLoader(tokenized_datasets[\"train\"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size)\n",
|
||||
"eval_dataloader = DataLoader(\n",
|
||||
" tokenized_datasets[\"validation\"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f3b9b2e8-f415-4d0f-9fb4-436f1a3585ea",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Preparing the VB-LoRA model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2ed5ac74",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-large and are newly initialized: ['classifier.dense.bias', 'classifier.dense.weight', 'classifier.out_proj.bias', 'classifier.out_proj.weight']\n",
|
||||
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"trainable params: 1,696,770 || all params: 357,058,564 || trainable%: 0.4752\n",
|
||||
"VB-LoRA params to-be-saved (float32-equivalent): 33,408 || total params to-be-saved: 1,085,058\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True, max_length=None)\n",
|
||||
"model = get_peft_model(model, peft_config)\n",
|
||||
"model.print_trainable_parameters()\n",
|
||||
"model.print_savable_parameters()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "0d2d0381",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS\n",
|
||||
"from transformers.trainer_pt_utils import get_parameter_names\n",
|
||||
"\n",
|
||||
"decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)\n",
|
||||
"decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n",
|
||||
"vector_bank_parameters = [name for name, _ in model.named_parameters() if \"vector_bank\" in name]\n",
|
||||
"logits_parameters = [name for name, _ in model.named_parameters() if \"logits\" in name ]\n",
|
||||
"\n",
|
||||
"optimizer_grouped_parameters = [\n",
|
||||
" {\n",
|
||||
" \"params\": [p for n, p in model.named_parameters() if n in decay_parameters and \\\n",
|
||||
" n not in logits_parameters and n not in vector_bank_parameters],\n",
|
||||
" \"weight_decay\": 0.1,\n",
|
||||
" \"lr\": head_lr,\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"params\": [p for n, p in model.named_parameters() if n not in decay_parameters and \\\n",
|
||||
" n not in logits_parameters and n not in vector_bank_parameters],\n",
|
||||
" \"weight_decay\": 0.0,\n",
|
||||
" \"lr\": head_lr,\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"params\": [p for n, p in model.named_parameters() if n in vector_bank_parameters],\n",
|
||||
" \"lr\": vector_bank_lr,\n",
|
||||
" \"weight_decay\": 0.0,\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"params\": [p for n, p in model.named_parameters() if n in logits_parameters],\n",
|
||||
" \"lr\": logits_lr,\n",
|
||||
" \"weight_decay\": 0.0,\n",
|
||||
" },\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"optimizer = AdamW(optimizer_grouped_parameters)\n",
|
||||
"lr_scheduler = get_linear_schedule_with_warmup(\n",
|
||||
" optimizer=optimizer,\n",
|
||||
" num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs),\n",
|
||||
" num_training_steps=(len(train_dataloader) * num_epochs),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c0dd5aa8-977b-4ac0-8b96-884b17bcdd00",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "fa0e73be",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" 0%| | 0/115 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n",
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.33it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.84it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 0: {'accuracy': 0.6691176470588235, 'f1': 0.786053882725832}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.37it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.83it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 1: {'accuracy': 0.5833333333333334, 'f1': 0.6136363636363636}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.34it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.82it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 2: {'accuracy': 0.7107843137254902, 'f1': 0.8238805970149253}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.34it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.80it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 3: {'accuracy': 0.8284313725490197, 'f1': 0.8833333333333333}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.34it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.79it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 4: {'accuracy': 0.8480392156862745, 'f1': 0.8847583643122676}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.30it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.78it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 5: {'accuracy': 0.8676470588235294, 'f1': 0.898876404494382}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.31it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.76it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 6: {'accuracy': 0.8602941176470589, 'f1': 0.9035532994923858}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.32it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.76it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 7: {'accuracy': 0.8774509803921569, 'f1': 0.911660777385159}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.33it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.79it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 8: {'accuracy': 0.8872549019607843, 'f1': 0.9172661870503597}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.32it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.78it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 9: {'accuracy': 0.875, 'f1': 0.9113043478260869}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.32it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.76it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 10: {'accuracy': 0.8823529411764706, 'f1': 0.9166666666666666}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.33it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.76it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 11: {'accuracy': 0.8970588235294118, 'f1': 0.9252669039145908}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.32it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.75it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 12: {'accuracy': 0.8946078431372549, 'f1': 0.9246935201401051}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.33it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.76it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 13: {'accuracy': 0.9068627450980392, 'f1': 0.9316546762589928}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.33it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.76it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 14: {'accuracy': 0.8946078431372549, 'f1': 0.9225225225225225}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.33it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.76it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 15: {'accuracy': 0.8995098039215687, 'f1': 0.926391382405745}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.30it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.76it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 16: {'accuracy': 0.9068627450980392, 'f1': 0.9316546762589928}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.31it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.77it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 17: {'accuracy': 0.8921568627450981, 'f1': 0.9217081850533808}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.33it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.77it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 18: {'accuracy': 0.8995098039215687, 'f1': 0.9266547406082289}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 115/115 [00:34<00:00, 3.33it/s]\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.77it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 19: {'accuracy': 0.9044117647058824, 'f1': 0.9297297297297298}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.to(device)\n",
|
||||
"\n",
|
||||
"for epoch in range(num_epochs):\n",
|
||||
" model.train()\n",
|
||||
" for step, batch in enumerate(tqdm(train_dataloader)):\n",
|
||||
" batch.to(device)\n",
|
||||
" outputs = model(**batch)\n",
|
||||
" loss = outputs.loss\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" lr_scheduler.step()\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
"\n",
|
||||
" model.eval()\n",
|
||||
" for step, batch in enumerate(tqdm(eval_dataloader)):\n",
|
||||
" batch.to(device)\n",
|
||||
" with torch.no_grad():\n",
|
||||
" outputs = model(**batch)\n",
|
||||
" predictions = outputs.logits.argmax(dim=-1)\n",
|
||||
" predictions, references = predictions, batch[\"labels\"]\n",
|
||||
" metric.add_batch(\n",
|
||||
" predictions=predictions,\n",
|
||||
" references=references,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" eval_metric = metric.compute()\n",
|
||||
" print(f\"epoch {epoch}:\", eval_metric)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f2b2caca",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Share adapters on the 🤗 Hub"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "7b23af6f-cf6e-486f-9d10-0eada95b631f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"account_id = ... # your Hugging Face Hub account ID"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "990b3c93",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model.push_to_hub(f\"{account_id}/roberta-large-peft-vblora\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9d140b26",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load adapters from the Hub\n",
|
||||
"\n",
|
||||
"You can also directly load adapters from the Hub using the commands below:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "c283e028-b349-46b0-a20e-cde0ee5fbd7b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from peft import PeftModel, PeftConfig\n",
|
||||
"from transformers import AutoTokenizer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "320b10a0-4ea8-4786-9f3c-4670019c6b18",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Some weights of RobertaForSequenceClassification were not initialized from the model checkpoint at roberta-large and are newly initialized: ['classifier.dense.bias', 'classifier.dense.weight', 'classifier.out_proj.bias', 'classifier.out_proj.weight']\n",
|
||||
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"peft_model_id = f\"{account_id}/roberta-large-peft-vblora\"\n",
|
||||
"config = PeftConfig.from_pretrained(peft_model_id)\n",
|
||||
"inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path)\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "b3a94049-bc01-4f2e-8cf9-66daf24a4402",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Load the model\n",
|
||||
"inference_model = PeftModel.from_pretrained(inference_model, peft_model_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "bd919fef-4e9a-4dc5-a957-7b879cfc5d38",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" 0%| | 0/13 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n",
|
||||
"100%|██████████| 13/13 [00:01<00:00, 7.81it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'accuracy': 0.9044117647058824, 'f1': 0.9297297297297298}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"inference_model.to(device)\n",
|
||||
"inference_model.eval()\n",
|
||||
"for step, batch in enumerate(tqdm(eval_dataloader)):\n",
|
||||
" batch.to(device)\n",
|
||||
" with torch.no_grad():\n",
|
||||
" outputs = inference_model(**batch)\n",
|
||||
" predictions = outputs.logits.argmax(dim=-1)\n",
|
||||
" predictions, references = predictions, batch[\"labels\"]\n",
|
||||
" metric.add_batch(\n",
|
||||
" predictions=predictions,\n",
|
||||
" references=references,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"eval_metric = metric.compute()\n",
|
||||
"print(eval_metric)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.14"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -94,7 +94,7 @@
|
||||
" task_type=\"SEQ_CLS\", \n",
|
||||
" r=rank,\n",
|
||||
" d_initial=0.1,\n",
|
||||
" target_modules=[\"query\", \"value\"],\n",
|
||||
" target_modules=[\"query\", \"value\", \"intermediate.dense\"],\n",
|
||||
" save_projection=True,\n",
|
||||
")\n",
|
||||
"head_lr = 1e-2\n",
|
||||
@ -205,7 +205,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"trainable params: 610,754 || all params: 125,257,924 || trainable%: 0.48759709605278145\n"
|
||||
"trainable params: 647,714 || all params: 125,294,884 || trainable%: 0.5170\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -255,76 +255,76 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
" 0%| | 0/29 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n",
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:23<00:00, 1.24it/s]\n",
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:01<00:00, 2.33it/s]\n"
|
||||
" 0%| | 0/29 [00:00<?, ?it/s]You're using a RobertaTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\n",
|
||||
"100%|██████████| 29/29 [00:18<00:00, 1.58it/s]\n",
|
||||
"100%|██████████| 4/4 [00:01<00:00, 3.52it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 0: {'accuracy': 0.7132352941176471, 'f1': 0.823529411764706}\n"
|
||||
"epoch 0: {'accuracy': 0.7475490196078431, 'f1': 0.8367670364500792}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:23<00:00, 1.26it/s]\n",
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:01<00:00, 2.30it/s]\n"
|
||||
"100%|██████████| 29/29 [00:17<00:00, 1.68it/s]\n",
|
||||
"100%|██████████| 4/4 [00:01<00:00, 3.37it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 1: {'accuracy': 0.7671568627450981, 'f1': 0.8484848484848485}\n"
|
||||
"epoch 1: {'accuracy': 0.7671568627450981, 'f1': 0.8536209553158706}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:23<00:00, 1.24it/s]\n",
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:01<00:00, 2.30it/s]\n"
|
||||
"100%|██████████| 29/29 [00:17<00:00, 1.66it/s]\n",
|
||||
"100%|██████████| 4/4 [00:01<00:00, 3.33it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 2: {'accuracy': 0.8259803921568627, 'f1': 0.8738898756660745}\n"
|
||||
"epoch 2: {'accuracy': 0.8553921568627451, 'f1': 0.8959435626102292}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:23<00:00, 1.25it/s]\n",
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:01<00:00, 2.41it/s]\n"
|
||||
"100%|██████████| 29/29 [00:17<00:00, 1.64it/s]\n",
|
||||
"100%|██████████| 4/4 [00:01<00:00, 3.35it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 3: {'accuracy': 0.8431372549019608, 'f1': 0.891156462585034}\n"
|
||||
"epoch 3: {'accuracy': 0.8823529411764706, 'f1': 0.9133574007220215}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:23<00:00, 1.25it/s]\n",
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:01<00:00, 2.35it/s]"
|
||||
"100%|██████████| 29/29 [00:17<00:00, 1.63it/s]\n",
|
||||
"100%|██████████| 4/4 [00:01<00:00, 3.17it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch 4: {'accuracy': 0.8480392156862745, 'f1': 0.8938356164383561}\n"
|
||||
"epoch 4: {'accuracy': 0.8897058823529411, 'f1': 0.9183303085299456}\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -520,18 +520,6 @@
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
|
@ -11,7 +11,7 @@ python train.py \
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
|
@ -11,7 +11,7 @@ accelerate launch --config_file "configs/deepspeed_config.yaml" train.py \
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
|
@ -11,7 +11,7 @@ accelerate launch --config_file "configs/fsdp_config.yaml" train.py \
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
|
@ -11,7 +11,7 @@ torchrun --nproc_per_node 8 --nnodes 1 train.py \
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
@ -28,7 +28,7 @@ torchrun --nproc_per_node 8 --nnodes 1 train.py \
|
||||
--per_device_eval_batch_size 8 \
|
||||
--gradient_accumulation_steps 8 \
|
||||
--gradient_checkpointing True \
|
||||
--use_reentrant False \
|
||||
--use_reentrant False \
|
||||
--dataset_text_field "content" \
|
||||
--use_peft_lora True \
|
||||
--lora_r 8 \
|
||||
|
@ -11,7 +11,7 @@ accelerate launch --config_file "configs/deepspeed_config_z3_qlora.yaml" train.
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
|
@ -11,7 +11,7 @@ accelerate launch --config_file "configs/fsdp_config_qlora.yaml" train.py \
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
|
@ -11,7 +11,7 @@ python train.py \
|
||||
--logging_steps 5 \
|
||||
--log_level "info" \
|
||||
--logging_strategy "steps" \
|
||||
--evaluation_strategy "epoch" \
|
||||
--eval_strategy "epoch" \
|
||||
--save_strategy "epoch" \
|
||||
--push_to_hub \
|
||||
--hub_private_repo True \
|
||||
|
@ -137,7 +137,8 @@ def main(model_args, data_args, training_args):
|
||||
max_seq_length=data_args.max_seq_length,
|
||||
)
|
||||
trainer.accelerator.print(f"{trainer.model}")
|
||||
trainer.model.print_trainable_parameters()
|
||||
if hasattr(trainer.model, "print_trainable_parameters"):
|
||||
trainer.model.print_trainable_parameters()
|
||||
|
||||
# train
|
||||
checkpoint = None
|
||||
|
15
examples/xlora/README.md
Normal file
15
examples/xlora/README.md
Normal file
@ -0,0 +1,15 @@
|
||||
# X-LoRA examples
|
||||
|
||||
## `xlora_inference_mistralrs.py`
|
||||
|
||||
Perform inference of an X-LoRA model using the inference engine mistral.rs.
|
||||
|
||||
Mistral.rs supports many base models besides Mistral, and can load models directly from saved LoRA checkpoints. Check out [adapter model docs](https://github.com/EricLBuehler/mistral.rs/blob/master/docs/ADAPTER_MODELS.md) and the [models support matrix](https://github.com/EricLBuehler/mistral.rs?tab=readme-ov-file#support-matrix).
|
||||
|
||||
Mistral.rs features X-LoRA support and incorporates techniques such as a dual-KV cache, continuous batching, Paged Attention, and optional non granular scalings, will allow vastly improved throughput.
|
||||
|
||||
Links:
|
||||
|
||||
- Installation: https://github.com/EricLBuehler/mistral.rs/blob/master/mistralrs-pyo3/README.md
|
||||
- Runnable example: https://github.com/EricLBuehler/mistral.rs/blob/master/examples/python/xlora_zephyr.py
|
||||
- Adapter model docs and making the ordering file: https://github.com/EricLBuehler/mistral.rs/blob/master/docs/ADAPTER_MODELS.md
|
25
examples/xlora/xlora_inference_mistralrs.py
Normal file
25
examples/xlora/xlora_inference_mistralrs.py
Normal file
@ -0,0 +1,25 @@
|
||||
from mistralrs import ChatCompletionRequest, Runner, Which
|
||||
|
||||
|
||||
runner = Runner(
|
||||
which=Which.XLora(
|
||||
tok_model_id=None, # Automatically determine from ordering file
|
||||
model_id=..., # Model ID of the base model (local path of HF model ID)
|
||||
xlora_model_id=..., # X-LoRA Model ID of the base model (local path of HF model ID)
|
||||
order=..., # Ordering file to ensure compatability with PEFT
|
||||
tgt_non_granular_index=3, # Only generate scalings for the first 3 decoding tokens, and then use the last generated one
|
||||
)
|
||||
)
|
||||
|
||||
res = runner.send_chat_completion_request(
|
||||
ChatCompletionRequest(
|
||||
model="mistral",
|
||||
messages=[{"role": "user", "content": "Tell me a story about 2 low rank matrices."}],
|
||||
max_tokens=256,
|
||||
presence_penalty=1.0,
|
||||
top_p=0.1,
|
||||
temperature=0.5,
|
||||
)
|
||||
)
|
||||
print(res.choices[0].message.content)
|
||||
print(res.usage)
|
@ -6,6 +6,7 @@ target-version = ['py38']
|
||||
[tool.ruff]
|
||||
target-version = "py38"
|
||||
line-length = 119
|
||||
extend-exclude = ["*.ipynb"]
|
||||
|
||||
[tool.ruff.lint]
|
||||
extend-select = [
|
||||
|
6
setup.py
6
setup.py
@ -15,13 +15,13 @@
|
||||
from setuptools import find_packages, setup
|
||||
|
||||
|
||||
VERSION = "0.10.1.dev0"
|
||||
VERSION = "0.13.1"
|
||||
|
||||
extras = {}
|
||||
extras["quality"] = [
|
||||
"black", # doc-builder has an implicit dependency on Black, see huggingface/doc-builder#434
|
||||
"hf-doc-builder",
|
||||
"ruff~=0.2.1",
|
||||
"ruff~=0.6.1",
|
||||
]
|
||||
extras["docs_specific"] = [
|
||||
"black", # doc-builder has an implicit dependency on Black, see huggingface/doc-builder#434
|
||||
@ -48,7 +48,7 @@ setup(
|
||||
keywords="deep learning",
|
||||
license="Apache",
|
||||
author="The HuggingFace team",
|
||||
author_email="sourab@huggingface.co",
|
||||
author_email="benjamin@huggingface.co",
|
||||
url="https://github.com/huggingface/peft",
|
||||
package_dir={"": "src"},
|
||||
packages=find_packages("src"),
|
||||
|
@ -17,7 +17,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "0.10.1.dev0"
|
||||
__version__ = "0.13.1"
|
||||
|
||||
from .auto import (
|
||||
AutoPeftModel,
|
||||
@ -44,11 +44,14 @@ from .peft_model import (
|
||||
PeftModelForTokenClassification,
|
||||
PeftModelForQuestionAnswering,
|
||||
PeftModelForFeatureExtraction,
|
||||
get_layer_status,
|
||||
get_model_status,
|
||||
)
|
||||
from .tuners import (
|
||||
AdaptionPromptConfig,
|
||||
AdaptionPromptModel,
|
||||
LoraConfig,
|
||||
LoraRuntimeConfig,
|
||||
LoftQConfig,
|
||||
LoraModel,
|
||||
LoHaConfig,
|
||||
@ -75,8 +78,19 @@ from .tuners import (
|
||||
OFTModel,
|
||||
PolyConfig,
|
||||
PolyModel,
|
||||
LNTuningConfig,
|
||||
LNTuningModel,
|
||||
VBLoRAConfig,
|
||||
VBLoRAModel,
|
||||
VeraConfig,
|
||||
VeraModel,
|
||||
FourierFTConfig,
|
||||
FourierFTModel,
|
||||
XLoraConfig,
|
||||
XLoraModel,
|
||||
HRAConfig,
|
||||
HRAModel,
|
||||
VBLoRAConfig,
|
||||
)
|
||||
from .utils import (
|
||||
TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
|
||||
|
@ -62,6 +62,7 @@ class _BaseAutoPeftModel:
|
||||
adapter_name: str = "default",
|
||||
is_trainable: bool = False,
|
||||
config: Optional[PeftConfig] = None,
|
||||
revision: Optional[str] = None,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
@ -69,8 +70,9 @@ class _BaseAutoPeftModel:
|
||||
are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and
|
||||
the config object init.
|
||||
"""
|
||||
peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
|
||||
peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, revision=revision, **kwargs)
|
||||
base_model_path = peft_config.base_model_name_or_path
|
||||
base_model_revision = peft_config.revision
|
||||
|
||||
task_type = getattr(peft_config, "task_type", None)
|
||||
|
||||
@ -101,7 +103,7 @@ class _BaseAutoPeftModel:
|
||||
"Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type."
|
||||
)
|
||||
|
||||
base_model = target_class.from_pretrained(base_model_path, **kwargs)
|
||||
base_model = target_class.from_pretrained(base_model_path, revision=base_model_revision, **kwargs)
|
||||
|
||||
tokenizer_exists = False
|
||||
if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)):
|
||||
@ -114,7 +116,7 @@ class _BaseAutoPeftModel:
|
||||
tokenizer_exists = check_file_exists_on_hf_hub(
|
||||
repo_id=pretrained_model_name_or_path,
|
||||
filename=TOKENIZER_CONFIG_NAME,
|
||||
revision=kwargs.get("revision", None),
|
||||
revision=revision,
|
||||
repo_type=kwargs.get("repo_type", None),
|
||||
token=token,
|
||||
)
|
||||
|
@ -14,6 +14,7 @@
|
||||
import inspect
|
||||
import json
|
||||
import os
|
||||
import warnings
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from typing import Dict, Optional, Union
|
||||
|
||||
@ -63,7 +64,7 @@ class PeftConfigMixin(PushToHubMixin):
|
||||
os.makedirs(save_directory, exist_ok=True)
|
||||
auto_mapping_dict = kwargs.pop("auto_mapping_dict", None)
|
||||
|
||||
output_dict = asdict(self)
|
||||
output_dict = self.to_dict()
|
||||
# converting set type to list
|
||||
for key, value in output_dict.items():
|
||||
if isinstance(value, set):
|
||||
@ -97,7 +98,7 @@ class PeftConfigMixin(PushToHubMixin):
|
||||
# TODO: this hack is needed to fix the following issue (on commit 702f937):
|
||||
# if someone saves a default config and loads it back with `PeftConfig` class it yields to
|
||||
# not loading the correct config class.
|
||||
|
||||
#
|
||||
# from peft import AdaLoraConfig, PeftConfig
|
||||
# peft_config = AdaLoraConfig()
|
||||
# print(peft_config)
|
||||
@ -162,6 +163,13 @@ class PeftConfigMixin(PushToHubMixin):
|
||||
with open(path_json_file) as file:
|
||||
json_object = json.load(file)
|
||||
|
||||
# Sanity check that config does not contain a runtime_config
|
||||
if "runtime_config" in json_object:
|
||||
warnings.warn(
|
||||
"The configuration file contains a `runtime_config` key. This is ignored. Runtime configurations are only valid at runtime."
|
||||
)
|
||||
del json_object["runtime_config"]
|
||||
|
||||
return json_object
|
||||
|
||||
@classmethod
|
||||
@ -232,7 +240,7 @@ class PeftConfig(PeftConfigMixin):
|
||||
base_model_name_or_path: Optional[str] = field(
|
||||
default=None, metadata={"help": "The name of the base model to use."}
|
||||
)
|
||||
revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."})
|
||||
revision: Optional[str] = field(default=None, metadata={"help": "The specific base model version to use."})
|
||||
peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"})
|
||||
task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"})
|
||||
inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"})
|
||||
|
@ -1,16 +1,32 @@
|
||||
# Copyright 2023-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
from contextlib import contextmanager
|
||||
from copy import deepcopy
|
||||
from functools import update_wrapper
|
||||
from types import MethodType
|
||||
|
||||
from .peft_model import PeftModel
|
||||
from .peft_model import PeftConfig, PeftModel
|
||||
from .tuners.lora.layer import LoraLayer
|
||||
|
||||
|
||||
def update_forward_signature(model: PeftModel) -> None:
|
||||
"""
|
||||
Args:
|
||||
Updates the forward signature of the PeftModel to include parents class signature
|
||||
model (`PeftModel`): Peft model to update the forward signature
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
@ -41,9 +57,9 @@ def update_forward_signature(model: PeftModel) -> None:
|
||||
|
||||
def update_generate_signature(model: PeftModel) -> None:
|
||||
"""
|
||||
Args:
|
||||
Updates the generate signature of a PeftModel with overriding generate to include parents class signature
|
||||
model (`PeftModel`): Peft model to update the generate signature
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
@ -81,12 +97,12 @@ def update_generate_signature(model: PeftModel) -> None:
|
||||
|
||||
def update_signature(model: PeftModel, method: str = "all") -> None:
|
||||
"""
|
||||
Args:
|
||||
Updates the signature of a PeftModel include parents class signature for forward or generate method
|
||||
model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update
|
||||
signature choose one of "forward", "generate", "all"
|
||||
|
||||
Example:
|
||||
```python
|
||||
```python
|
||||
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
||||
>>> from peft import get_peft_model, LoraConfig, TaskType, update_signature
|
||||
|
||||
@ -111,3 +127,84 @@ def update_signature(model: PeftModel, method: str = "all") -> None:
|
||||
update_generate_signature(model)
|
||||
else:
|
||||
raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']")
|
||||
|
||||
|
||||
def check_if_peft_model(model_name_or_path: str) -> bool:
|
||||
"""
|
||||
Check if the model is a PEFT model.
|
||||
|
||||
Args:
|
||||
model_name_or_path (`str`):
|
||||
Model id to check, can be local or on the Hugging Face Hub.
|
||||
|
||||
Returns:
|
||||
`bool`: True if the model is a PEFT model, False otherwise.
|
||||
"""
|
||||
is_peft_model = True
|
||||
try:
|
||||
PeftConfig.from_pretrained(model_name_or_path)
|
||||
except Exception:
|
||||
# allow broad exceptions so that this works even if new exceptions are added on HF Hub side
|
||||
is_peft_model = False
|
||||
|
||||
return is_peft_model
|
||||
|
||||
|
||||
@contextmanager
|
||||
def rescale_adapter_scale(model, multiplier):
|
||||
"""
|
||||
Context manager to temporarily rescale the scaling of the LoRA adapter in a model.
|
||||
|
||||
The original scaling values are restored when the context manager exits. This context manager works with the
|
||||
transformers and diffusers models that have directly loaded LoRA adapters.
|
||||
|
||||
For LoRA, applying this context manager with multiplier in [0, 1] is strictly equivalent to applying
|
||||
[wise-ft](https://arxiv.org/abs/2109.01903) (see [#1940](https://github.com/huggingface/peft/issues/1940) for
|
||||
details). It can improve the performances of the model if there is a distribution shiftbetween the training data
|
||||
used for fine-tuning, and the test data used during inference.
|
||||
|
||||
Warning: It has been reported that when using Apple's MPS backend for PyTorch, it is necessary to add a short sleep
|
||||
time after exiting the context before the scales are fully restored.
|
||||
|
||||
Args:
|
||||
model: The model containing `LoraLayer` modules whose scaling is to be adjusted.
|
||||
multiplier (float or int): The multiplier that rescales the `scaling` attribute. Must be of type float or int.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model does not contain any `LoraLayer`
|
||||
instances, indicating that the model does not support scaling.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> model = ModelWithLoraLayer()
|
||||
>>> multiplier = 0.5
|
||||
>>> with rescale_adapter_scale(model, multiplier):
|
||||
... outputs = model(**inputs) # Perform operations with the scaled model
|
||||
>>> outputs = model(**inputs) # The original scaling values are restored here
|
||||
```
|
||||
"""
|
||||
# check if multiplier has a valid data type
|
||||
if not isinstance(multiplier, (float, int)):
|
||||
raise TypeError(f"Argument multiplier should be of type float, got {type(multiplier)}")
|
||||
|
||||
# iterate on the model's modules and grab the original scaling attribute
|
||||
# from the lora layers if present
|
||||
original_scaling = {}
|
||||
for module in model.modules():
|
||||
if isinstance(module, LoraLayer):
|
||||
original_scaling[module] = module.scaling.copy()
|
||||
module.scaling = {k: v * multiplier for k, v in module.scaling.items()}
|
||||
|
||||
# check whether scaling is prohibited on model
|
||||
# the original scaling dictionary should be empty
|
||||
# if there were no lora layers
|
||||
if not original_scaling:
|
||||
raise ValueError("scaling is only supported for models with `LoraLayer`s")
|
||||
try:
|
||||
yield
|
||||
|
||||
finally:
|
||||
# restore original scaling values after exiting the context
|
||||
for module, scaling in original_scaling.items():
|
||||
module.scaling = scaling
|
||||
|
@ -77,3 +77,13 @@ def is_aqlm_available():
|
||||
@lru_cache
|
||||
def is_auto_awq_available():
|
||||
return importlib.util.find_spec("awq") is not None
|
||||
|
||||
|
||||
@lru_cache
|
||||
def is_eetq_available():
|
||||
return importlib.util.find_spec("eetq") is not None
|
||||
|
||||
|
||||
@lru_cache
|
||||
def is_hqq_available():
|
||||
return importlib.util.find_spec("hqq") is not None
|
||||
|
@ -14,10 +14,13 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
import warnings
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
import torch
|
||||
|
||||
from peft.tuners.xlora.model import XLoraModel
|
||||
|
||||
from .config import PeftConfig
|
||||
from .mixed_model import PeftMixedModel
|
||||
from .peft_model import (
|
||||
@ -35,8 +38,14 @@ from .tuners import (
|
||||
AdaptionPromptConfig,
|
||||
BOFTConfig,
|
||||
BOFTModel,
|
||||
FourierFTConfig,
|
||||
FourierFTModel,
|
||||
HRAConfig,
|
||||
HRAModel,
|
||||
IA3Config,
|
||||
IA3Model,
|
||||
LNTuningConfig,
|
||||
LNTuningModel,
|
||||
LoHaConfig,
|
||||
LoHaModel,
|
||||
LoKrConfig,
|
||||
@ -51,10 +60,13 @@ from .tuners import (
|
||||
PrefixTuningConfig,
|
||||
PromptEncoderConfig,
|
||||
PromptTuningConfig,
|
||||
VBLoRAConfig,
|
||||
VBLoRAModel,
|
||||
VeraConfig,
|
||||
VeraModel,
|
||||
XLoraConfig,
|
||||
)
|
||||
from .tuners.tuners_utils import BaseTuner as _BaseTuner
|
||||
from .tuners.tuners_utils import BaseTuner
|
||||
from .utils import _prepare_prompt_learning_config
|
||||
|
||||
|
||||
@ -78,6 +90,7 @@ PEFT_TYPE_TO_CONFIG_MAPPING: dict[str, type[PeftConfig]] = {
|
||||
"P_TUNING": PromptEncoderConfig,
|
||||
"LORA": LoraConfig,
|
||||
"LOHA": LoHaConfig,
|
||||
"LORAPLUS": LoraConfig,
|
||||
"LOKR": LoKrConfig,
|
||||
"ADALORA": AdaLoraConfig,
|
||||
"BOFT": BOFTConfig,
|
||||
@ -85,10 +98,15 @@ PEFT_TYPE_TO_CONFIG_MAPPING: dict[str, type[PeftConfig]] = {
|
||||
"MULTITASK_PROMPT_TUNING": MultitaskPromptTuningConfig,
|
||||
"OFT": OFTConfig,
|
||||
"POLY": PolyConfig,
|
||||
"LN_TUNING": LNTuningConfig,
|
||||
"VERA": VeraConfig,
|
||||
"FOURIERFT": FourierFTConfig,
|
||||
"XLORA": XLoraConfig,
|
||||
"HRA": HRAConfig,
|
||||
"VBLORA": VBLoRAConfig,
|
||||
}
|
||||
|
||||
PEFT_TYPE_TO_TUNER_MAPPING: dict[str, type[_BaseTuner]] = {
|
||||
PEFT_TYPE_TO_TUNER_MAPPING: dict[str, type[BaseTuner]] = {
|
||||
"LORA": LoraModel,
|
||||
"LOHA": LoHaModel,
|
||||
"LOKR": LoKrModel,
|
||||
@ -97,7 +115,12 @@ PEFT_TYPE_TO_TUNER_MAPPING: dict[str, type[_BaseTuner]] = {
|
||||
"IA3": IA3Model,
|
||||
"OFT": OFTModel,
|
||||
"POLY": PolyModel,
|
||||
"LN_TUNING": LNTuningModel,
|
||||
"VERA": VeraModel,
|
||||
"FOURIERFT": FourierFTModel,
|
||||
"XLORA": XLoraModel,
|
||||
"HRA": HRAModel,
|
||||
"VBLORA": VBLoRAModel,
|
||||
}
|
||||
|
||||
|
||||
@ -113,7 +136,12 @@ def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig:
|
||||
|
||||
|
||||
def get_peft_model(
|
||||
model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False
|
||||
model: PreTrainedModel,
|
||||
peft_config: PeftConfig,
|
||||
adapter_name: str = "default",
|
||||
mixed: bool = False,
|
||||
autocast_adapter_dtype: bool = True,
|
||||
revision: Optional[str] = None,
|
||||
) -> PeftModel | PeftMixedModel:
|
||||
"""
|
||||
Returns a Peft model object from a model and a config.
|
||||
@ -127,26 +155,48 @@ def get_peft_model(
|
||||
The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
|
||||
mixed (`bool`, `optional`, defaults to `False`):
|
||||
Whether to allow mixing different (compatible) adapter types.
|
||||
autocast_adapter_dtype (`bool`, *optional*):
|
||||
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter weights
|
||||
using float16 or bfloat16 to float32, as this is typically required for stable training, and only affect
|
||||
select PEFT tuners.
|
||||
revision (`str`, `optional`, defaults to `main`):
|
||||
The revision of the base model. If this isn't set, the saved peft model will load the `main` revision for
|
||||
the base model
|
||||
"""
|
||||
model_config = getattr(model, "config", {"model_type": "custom"})
|
||||
if hasattr(model_config, "to_dict"):
|
||||
model_config = model_config.to_dict()
|
||||
model_config = BaseTuner.get_model_config(model)
|
||||
old_name = peft_config.base_model_name_or_path
|
||||
new_name = model.__dict__.get("name_or_path", None)
|
||||
peft_config.base_model_name_or_path = new_name
|
||||
|
||||
peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
|
||||
if (old_name is not None) and (old_name != new_name):
|
||||
warnings.warn(
|
||||
f"The PEFT config's `base_model_name_or_path` was renamed from '{old_name}' to '{new_name}'. "
|
||||
"Please ensure that the correct base model is loaded when loading this checkpoint."
|
||||
)
|
||||
|
||||
if revision is not None:
|
||||
if peft_config.revision is not None and peft_config.revision != revision:
|
||||
warnings.warn(
|
||||
f"peft config has already set base model revision to {peft_config.revision}, overwriting with revision {revision}"
|
||||
)
|
||||
peft_config.revision = revision
|
||||
|
||||
if mixed:
|
||||
# note: PeftMixedModel does not support autocast_adapter_dtype, so don't pass it
|
||||
return PeftMixedModel(model, peft_config, adapter_name=adapter_name)
|
||||
|
||||
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
|
||||
return PeftModel(model, peft_config, adapter_name=adapter_name)
|
||||
return PeftModel(model, peft_config, adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype)
|
||||
|
||||
if peft_config.is_prompt_learning:
|
||||
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
|
||||
return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name)
|
||||
return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](
|
||||
model, peft_config, adapter_name=adapter_name, autocast_adapter_dtype=autocast_adapter_dtype
|
||||
)
|
||||
|
||||
|
||||
def inject_adapter_in_model(
|
||||
peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default"
|
||||
peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default", low_cpu_mem_usage: bool = False
|
||||
) -> torch.nn.Module:
|
||||
r"""
|
||||
A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning
|
||||
@ -160,6 +210,8 @@ def inject_adapter_in_model(
|
||||
The input model where the adapter will be injected.
|
||||
adapter_name (`str`, `optional`, defaults to `"default"`):
|
||||
The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
|
||||
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
|
||||
Create empty adapter weights on meta device. Useful to speed up the loading process.
|
||||
"""
|
||||
if peft_config.is_prompt_learning or peft_config.is_adaption_prompt:
|
||||
raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.")
|
||||
@ -172,6 +224,6 @@ def inject_adapter_in_model(
|
||||
tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type]
|
||||
|
||||
# By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules.
|
||||
peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name)
|
||||
peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name, low_cpu_mem_usage=low_cpu_mem_usage)
|
||||
|
||||
return peft_model.model
|
||||
|
@ -23,7 +23,7 @@ from accelerate.hooks import remove_hook_from_submodules
|
||||
from torch import nn
|
||||
from transformers.utils import PushToHubMixin
|
||||
|
||||
from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES
|
||||
from peft.utils.constants import DUMMY_MODEL_CONFIG
|
||||
|
||||
from .config import PeftConfig
|
||||
from .peft_model import PeftModel
|
||||
@ -36,6 +36,7 @@ from .tuners import (
|
||||
MixedModel,
|
||||
OFTModel,
|
||||
)
|
||||
from .tuners.mixed import COMPATIBLE_TUNER_TYPES
|
||||
from .utils import PeftType, _set_adapter, _set_trainable
|
||||
|
||||
|
||||
@ -97,8 +98,6 @@ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
|
||||
Example:
|
||||
|
||||
```py
|
||||
>>> from peft import get_peft_model
|
||||
|
||||
>>> base_model = ... # load the base model, e.g. from transformers
|
||||
>>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval()
|
||||
>>> peft_model.load_adapter(path_to_adapter2, "adapter2")
|
||||
@ -113,6 +112,8 @@ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
|
||||
The config of the model to be tuned. The adapter type must be compatible.
|
||||
adapter_name (`str`, `optional`, defaults to `"default"`):
|
||||
The name of the first adapter.
|
||||
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
|
||||
Create empty adapter weights on meta device. Useful to speed up the loading process.
|
||||
"""
|
||||
|
||||
def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None:
|
||||
@ -123,7 +124,7 @@ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
|
||||
self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name)
|
||||
self.set_modules_to_save(peft_config, adapter_name)
|
||||
|
||||
self.config = getattr(model, "config", {"model_type": "custom"})
|
||||
self.config = getattr(model, "config", DUMMY_MODEL_CONFIG)
|
||||
|
||||
# the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid
|
||||
# numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected
|
||||
@ -193,6 +194,8 @@ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
|
||||
try:
|
||||
return super().__getattr__(name) # defer to nn.Module's logic
|
||||
except AttributeError:
|
||||
if name == "base_model": # see #1892: prevent infinite recursion if class is not initialized
|
||||
raise
|
||||
return getattr(self.base_model, name)
|
||||
|
||||
def forward(self, *args: Any, **kwargs: Any):
|
||||
@ -218,12 +221,38 @@ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
|
||||
finally:
|
||||
self.base_model.enable_adapter_layers()
|
||||
|
||||
def add_adapter(self, adapter_name: str, peft_config: PeftConfig):
|
||||
def add_adapter(self, adapter_name: str, peft_config: PeftConfig, low_cpu_mem_usage: bool = False) -> None:
|
||||
"""
|
||||
Add an adapter to the model based on the passed configuration.
|
||||
|
||||
This adapter is not trained. To load a trained adapter, check out [`PeftModel.load_adapter`].
|
||||
|
||||
The name for the new adapter should be unique.
|
||||
|
||||
The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
|
||||
adapter.
|
||||
|
||||
Args:
|
||||
adapter_name (`str`):
|
||||
The name of the adapter to be added.
|
||||
peft_config ([`PeftConfig`]):
|
||||
The configuration of the adapter to be added.
|
||||
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
|
||||
Create empty adapter weights on meta device. Useful to speed up the process when loading saved
|
||||
adapters.
|
||||
|
||||
<Tip>
|
||||
|
||||
Don't use `low_cpu_mem_usage=True` when creating a new PEFT adapter for training (training is untested
|
||||
and discouraged for PeftMixedModel in general).
|
||||
|
||||
</Tip>
|
||||
"""
|
||||
_check_config_compatible(peft_config)
|
||||
|
||||
try:
|
||||
self.peft_config[adapter_name] = peft_config
|
||||
self.base_model.inject_adapter(self, adapter_name)
|
||||
self.base_model.inject_adapter(self, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage)
|
||||
except Exception: # something went wrong, roll back
|
||||
if adapter_name in self.peft_config:
|
||||
del self.peft_config[adapter_name]
|
||||
@ -311,11 +340,48 @@ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
|
||||
"""
|
||||
return self.base_model.unload(*args, **kwargs)
|
||||
|
||||
def get_layer_status(self):
|
||||
raise TypeError(f"get_layer_status is not supported for {self.__class__.__name__}.")
|
||||
|
||||
def get_model_status(self):
|
||||
raise TypeError(f"get_model_status is not supported for {self.__class__.__name__}.")
|
||||
|
||||
@classmethod
|
||||
def _split_kwargs(cls, kwargs: dict[str, Any]):
|
||||
return PeftModel._split_kwargs(kwargs)
|
||||
|
||||
def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any):
|
||||
"""
|
||||
Load a trained adapter into the model.
|
||||
|
||||
The name for the new adapter should be unique.
|
||||
|
||||
The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active
|
||||
adapter.
|
||||
|
||||
Args:
|
||||
adapter_name (`str`):
|
||||
The name of the adapter to be added.
|
||||
peft_config ([`PeftConfig`]):
|
||||
The configuration of the adapter to be added.
|
||||
is_trainable (`bool`, *optional*, defaults to `False`):
|
||||
Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
|
||||
used for inference.
|
||||
torch_device (`str`, *optional*, defaults to None):
|
||||
The device to load the adapter on. If `None`, the device will be inferred.
|
||||
autocast_adapter_dtype (`bool`, *optional*, defaults to `True`):
|
||||
Whether to autocast the adapter dtype. Defaults to `True`. Right now, this will only cast adapter
|
||||
weights using float16 and bfloat16 to float32, as this is typically required for stable training, and
|
||||
only affect select PEFT tuners.
|
||||
ephemeral_gpu_offload (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use ephemeral GPU offloading for partially loaded modules. Defaults to `False`.
|
||||
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
|
||||
Create empty adapter weights on meta device before loading the saved weights. Useful to speed up the
|
||||
process.
|
||||
kwargs: (`optional`):
|
||||
Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub.
|
||||
"""
|
||||
# the low_cpu_mem_usage option is handled through kwargs
|
||||
output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs)
|
||||
# TODO: not quite clear why this is necessary but tests fail without it
|
||||
self.set_adapter(self.active_adapters)
|
||||
@ -366,6 +432,9 @@ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
|
||||
The configuration object to use instead of an automatically loaded configuration. This configuration
|
||||
object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already
|
||||
loaded before calling `from_pretrained`.
|
||||
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
|
||||
Create empty adapter weights on meta device before loading the saved weights. Useful to speed up the
|
||||
process.
|
||||
kwargs: (`optional`):
|
||||
Additional keyword arguments passed along to the specific PEFT configuration class.
|
||||
"""
|
||||
@ -405,5 +474,6 @@ class PeftMixedModel(PushToHubMixin, torch.nn.Module):
|
||||
|
||||
# note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel
|
||||
model = cls(model, config, adapter_name)
|
||||
# the low_cpu_mem_usage option is handled through kwargs
|
||||
model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs)
|
||||
return model
|
||||
|
18
src/peft/optimizers/__init__.py
Normal file
18
src/peft/optimizers/__init__.py
Normal file
@ -0,0 +1,18 @@
|
||||
# Copyright 2024-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .loraplus import create_loraplus_optimizer
|
||||
|
||||
|
||||
__all__ = ["create_loraplus_optimizer"]
|
121
src/peft/optimizers/loraplus.py
Normal file
121
src/peft/optimizers/loraplus.py
Normal file
@ -0,0 +1,121 @@
|
||||
# Copyright 2024-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This module contains the implementation of the LoraPlus optimizer.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from operator import attrgetter
|
||||
|
||||
import torch.nn as nn
|
||||
from torch.optim import Optimizer
|
||||
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
|
||||
from transformers.trainer_pt_utils import get_parameter_names
|
||||
|
||||
from ..peft_model import PeftModel
|
||||
from ..tuners.lora.layer import Embedding
|
||||
|
||||
|
||||
def create_loraplus_optimizer(
|
||||
model: PeftModel, optimizer_cls: type[Optimizer], *, lr: float, loraplus_lr_ratio: float, **kwargs
|
||||
) -> Optimizer:
|
||||
"""
|
||||
Creates a LoraPlus optimizer.
|
||||
|
||||
Efficient Low Rank Adaptation of Large Models: https://arxiv.org/abs/2402.12354
|
||||
|
||||
Reference: https://github.com/nikhil-ghosh-berkeley/loraplus/
|
||||
|
||||
Args:
|
||||
model (`torch.nn.Module`): The model to be optimized.
|
||||
optimizer_cls (`torch.optim.Optimizer`): The optimizer class to be used.
|
||||
lr (`float`): The learning rate to be used for the optimizer.
|
||||
loraplus_lr_ratio (`float`):
|
||||
The ratio of learning ηB/ηA where ηA (lr) is passed in as the optimizer learning rate. Should be ≥1. Should
|
||||
be set in tandem with the optimizer learning rate (lr); should be larger when the task is more difficult
|
||||
and the model needs to update its features to learn well. In this case, it helps to make the learning rate
|
||||
slightly smaller (e.g., by a factor of 2) than typical vanilla LoRA learning rates
|
||||
loraplus_lr_embedding (optional `float`):
|
||||
If LoRA modules are added to embedding layers your can specify a different learning rate for them. Default
|
||||
value 1e-6.
|
||||
kwargs (`dict`): Additional keyword arguments to be passed to the optimizer.
|
||||
|
||||
Returns:
|
||||
`torch.optim.Optimizer`: An instance of the specified optimizer class configured with the model's parameters
|
||||
organized into groups with custom learning rates.
|
||||
"""
|
||||
|
||||
decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
|
||||
decay_parameters = [name for name in decay_parameters if "bias" not in name]
|
||||
param_groups = {
|
||||
"groupA": {},
|
||||
"groupB": {},
|
||||
"groupB_no_decay": {},
|
||||
"embedding": {},
|
||||
}
|
||||
|
||||
for name, param in model.named_parameters():
|
||||
if not param.requires_grad:
|
||||
continue
|
||||
|
||||
module = attrgetter(name)(model)
|
||||
if isinstance(module, Embedding):
|
||||
param_groups["embedding"][name] = param
|
||||
elif "lora_B" in name or param.ndim == 1:
|
||||
if name in decay_parameters:
|
||||
param_groups["groupB"][name] = param
|
||||
else:
|
||||
param_groups["groupB_no_decay"][name] = param
|
||||
else:
|
||||
param_groups["groupA"][name] = param
|
||||
|
||||
kwargs["lr"] = lr
|
||||
loraplus_weight_decay = kwargs.pop("loraplus_weight_decay", 0.0)
|
||||
loraplus_lr_embedding = kwargs.pop("loraplus_lr_embedding", 1e-6)
|
||||
|
||||
optimizer_grouped_parameters = [
|
||||
{
|
||||
"params": list(param_groups["groupA"].values()),
|
||||
"weight_decay": loraplus_weight_decay,
|
||||
"lr": lr,
|
||||
},
|
||||
{
|
||||
"params": list(param_groups["embedding"].values()),
|
||||
"weight_decay": loraplus_weight_decay,
|
||||
"lr": loraplus_lr_embedding,
|
||||
},
|
||||
{
|
||||
"params": list(param_groups["groupB"].values()),
|
||||
"weight_decay": loraplus_weight_decay,
|
||||
"lr": lr * loraplus_lr_ratio,
|
||||
},
|
||||
{
|
||||
"params": list(param_groups["groupB_no_decay"].values()),
|
||||
"weight_decay": 0.0,
|
||||
"lr": lr * loraplus_lr_ratio,
|
||||
},
|
||||
]
|
||||
|
||||
optimizer = optimizer_cls(optimizer_grouped_parameters, **kwargs)
|
||||
eight_bit_names = ["Adam8bit", "AdamW8bit", "PagedAdam8bit", "PagedAdamW8bit"]
|
||||
if optimizer_cls.__name__ in eight_bit_names:
|
||||
import bitsandbytes
|
||||
|
||||
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
|
||||
for module in model.modules():
|
||||
if isinstance(module, nn.Embedding):
|
||||
manager.register_module_override(module, "weight", {"optim_bits": 32})
|
||||
return optimizer
|
File diff suppressed because it is too large
Load Diff
@ -18,7 +18,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel
|
||||
from .lora import LoraConfig, LoraModel, LoftQConfig
|
||||
from .lora import LoraConfig, LoraModel, LoftQConfig, LoraRuntimeConfig
|
||||
from .loha import LoHaConfig, LoHaModel
|
||||
from .lokr import LoKrConfig, LoKrModel
|
||||
from .ia3 import IA3Config, IA3Model
|
||||
@ -31,4 +31,9 @@ from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTu
|
||||
from .oft import OFTConfig, OFTModel
|
||||
from .mixed import MixedModel
|
||||
from .poly import PolyConfig, PolyModel
|
||||
from .ln_tuning import LNTuningConfig, LNTuningModel
|
||||
from .vera import VeraConfig, VeraModel
|
||||
from .fourierft import FourierFTConfig, FourierFTModel
|
||||
from .xlora import XLoraConfig, XLoraModel
|
||||
from .hra import HRAConfig, HRAModel
|
||||
from .vblora import VBLoRAConfig, VBLoRAModel
|
||||
|
@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import warnings
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
@ -50,3 +51,27 @@ class AdaLoraConfig(LoraConfig):
|
||||
|
||||
def __post_init__(self):
|
||||
self.peft_type = PeftType.ADALORA
|
||||
|
||||
if self.use_dora:
|
||||
raise ValueError(f"{self.peft_type} does not support DoRA.")
|
||||
|
||||
if self.loftq_config:
|
||||
raise ValueError(f"{self.peft_type} does not support LOFTQ.")
|
||||
|
||||
self.target_modules = (
|
||||
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
|
||||
)
|
||||
# if target_modules is a regex expression, then layers_to_transform should be None
|
||||
if isinstance(self.target_modules, str) and self.layers_to_transform is not None:
|
||||
raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.")
|
||||
|
||||
# if target_modules is a regex expression, then layers_pattern should be None
|
||||
if isinstance(self.target_modules, str) and self.layers_pattern is not None:
|
||||
raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.")
|
||||
|
||||
# Check if 'r' has been set to a non-default value
|
||||
if self.r != 8: # 8 is the default value for 'r' in LoraConfig
|
||||
warnings.warn(
|
||||
"Note that `r` is not used in AdaLora and will be ignored."
|
||||
"If you intended to set the initial rank, use `init_r` instead."
|
||||
)
|
||||
|
@ -35,7 +35,8 @@ class AdaLoraLayer(LoraLayer):
|
||||
# List all names of layers that may contain adapter weights
|
||||
# Note: ranknum doesn't need to be included as it is not an nn.Module
|
||||
adapter_layer_names = ("lora_A", "lora_B", "lora_E", "lora_embedding_A", "lora_embedding_B")
|
||||
# other_param_names is defined in LoraLayer
|
||||
# All names of other parameters that may contain adapter-related parameters
|
||||
other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout", "ranknum")
|
||||
|
||||
def __init__(self, base_layer: nn.Module) -> None:
|
||||
super().__init__(base_layer)
|
||||
@ -72,16 +73,12 @@ class AdaLoraLayer(LoraLayer):
|
||||
if init_lora_weights:
|
||||
self.reset_lora_parameters(adapter_name)
|
||||
|
||||
if hasattr(self.get_base_layer(), "qweight"):
|
||||
# QuantLinear
|
||||
self.to(self.get_base_layer().qweight.device)
|
||||
else:
|
||||
self.to(self.get_base_layer().weight.device)
|
||||
self._move_adapter_to_device_of_base_layer(adapter_name)
|
||||
self.set_adapter(self.active_adapters)
|
||||
|
||||
def reset_lora_parameters(self, adapter_name):
|
||||
if adapter_name in self.lora_A.keys():
|
||||
nn.init.normal_(self.lora_E[adapter_name], mean=0.0, std=0.02)
|
||||
nn.init.zeros_(self.lora_E[adapter_name])
|
||||
nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02)
|
||||
nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02)
|
||||
|
||||
|
@ -42,15 +42,17 @@ class AdaLoraModel(LoraModel):
|
||||
model ([`transformers.PreTrainedModel`]): The model to be adapted.
|
||||
config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
|
||||
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
|
||||
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
|
||||
Create empty adapter weights on meta device. Useful to speed up the loading process.
|
||||
|
||||
Returns:
|
||||
`torch.nn.Module`: The AdaLora model.
|
||||
|
||||
Example::
|
||||
|
||||
>>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig
|
||||
>>> from transformers import AutoModelForSeq2SeqLM >>> from peft import LoraConfig, AdaLoraModel, AdaLoraConfig
|
||||
>>> config = AdaLoraConfig(
|
||||
peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"],
|
||||
peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", init_r=12, lora_alpha=32, target_modules=["q", "v"],
|
||||
lora_dropout=0.01,
|
||||
)
|
||||
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default")
|
||||
@ -229,6 +231,8 @@ class AdaLoraModel(LoraModel):
|
||||
try:
|
||||
return super().__getattr__(name) # defer to nn.Module's logic
|
||||
except AttributeError:
|
||||
if name == "model": # see #1892: prevent infinite recursion if class is not initialized
|
||||
raise
|
||||
return getattr(self.model, name)
|
||||
|
||||
def forward(self, *args, **kwargs):
|
||||
@ -349,3 +353,7 @@ class AdaLoraModel(LoraModel):
|
||||
# Pass the function and do forward propagation
|
||||
else:
|
||||
return None
|
||||
|
||||
def add_weighted_adapter(self, *args, **kwargs):
|
||||
"""This method is not supported for AdaLoRA, use LoRA instead."""
|
||||
raise TypeError(f"{self.__class__.__name__} does not support add_weighted_adapter method.")
|
||||
|
@ -158,4 +158,6 @@ class AdaptionPromptModel(nn.Module):
|
||||
except AttributeError:
|
||||
# This is necessary as e.g. causal models have various methods that we
|
||||
# don't want to re-implement here.
|
||||
if name == "model": # see #1892: prevent infinite recursion if class is not initialized
|
||||
raise
|
||||
return getattr(self.model, name)
|
||||
|
@ -20,41 +20,77 @@ from __future__ import annotations
|
||||
import math
|
||||
import os
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.autograd import Function
|
||||
from torch.utils.cpp_extension import load
|
||||
|
||||
from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge
|
||||
|
||||
|
||||
os.environ["CC"] = "gcc"
|
||||
os.environ["CXX"] = "gcc"
|
||||
curr_dir = os.path.dirname(__file__)
|
||||
|
||||
_FBD_CUDA = None
|
||||
|
||||
|
||||
# this function is a 1:1 copy from accelerate
|
||||
@contextmanager
|
||||
def patch_environment(**kwargs):
|
||||
"""
|
||||
A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
|
||||
|
||||
Will convert the values in `kwargs` to strings and upper-case all the keys.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> import os
|
||||
>>> from accelerate.utils import patch_environment
|
||||
|
||||
>>> with patch_environment(FOO="bar"):
|
||||
... print(os.environ["FOO"]) # prints "bar"
|
||||
>>> print(os.environ["FOO"]) # raises KeyError
|
||||
```
|
||||
"""
|
||||
existing_vars = {}
|
||||
for key, value in kwargs.items():
|
||||
key = key.upper()
|
||||
if key in os.environ:
|
||||
existing_vars[key] = os.environ[key]
|
||||
os.environ[key] = str(value)
|
||||
|
||||
yield
|
||||
|
||||
for key in kwargs:
|
||||
key = key.upper()
|
||||
if key in existing_vars:
|
||||
# restore previous value
|
||||
os.environ[key] = existing_vars[key]
|
||||
else:
|
||||
os.environ.pop(key, None)
|
||||
|
||||
|
||||
def get_fbd_cuda():
|
||||
global _FBD_CUDA
|
||||
|
||||
if _FBD_CUDA is not None:
|
||||
return _FBD_CUDA
|
||||
|
||||
# This import initializes cuda context and should thus be local, see issue 1877
|
||||
from torch.utils.cpp_extension import load
|
||||
|
||||
curr_dir = os.path.dirname(__file__)
|
||||
# need ninja to build the extension
|
||||
try:
|
||||
fbd_cuda = load(
|
||||
name="fbd_cuda",
|
||||
sources=[f"{curr_dir}/fbd/fbd_cuda.cpp", f"{curr_dir}/fbd/fbd_cuda_kernel.cu"],
|
||||
verbose=True,
|
||||
# build_directory='/tmp/' # for debugging
|
||||
)
|
||||
# extra_cuda_cflags = ['-std=c++14', '-ccbin=$$(which gcc-7)']) # cuda10.2 is not compatible with gcc9. Specify gcc 7
|
||||
import fbd_cuda
|
||||
with patch_environment(CC="gcc", CXX="gcc"):
|
||||
fbd_cuda = load(
|
||||
name="fbd_cuda",
|
||||
sources=[f"{curr_dir}/fbd/fbd_cuda.cpp", f"{curr_dir}/fbd/fbd_cuda_kernel.cu"],
|
||||
verbose=True,
|
||||
# build_directory='/tmp/' # for debugging
|
||||
)
|
||||
# extra_cuda_cflags = ['-std=c++14', '-ccbin=$$(which gcc-7)']) # cuda10.2 is not compatible with gcc9. Specify gcc 7
|
||||
except Exception as e:
|
||||
warnings.warn(f"Failed to load the CUDA extension: {e}, check if ninja is available.")
|
||||
warnings.warn("Setting boft_n_butterfly_factor to 1 to speed up the finetuning process.")
|
||||
@ -228,6 +264,14 @@ class BOFTLayer(BaseTunerLayer):
|
||||
"""
|
||||
Update the linear layer with trainable BOFT weights. Override for other layer types.
|
||||
"""
|
||||
# Attempt to load the CUDA extension during model initialization
|
||||
if not get_fbd_cuda():
|
||||
self.fbd_cuda_available = False
|
||||
# If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process
|
||||
boft_n_butterfly_factor = 1
|
||||
else:
|
||||
self.fbd_cuda_available = True
|
||||
|
||||
# to be consistent with the paper notation
|
||||
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
|
||||
if boft_n_butterfly_factor < 0:
|
||||
@ -301,7 +345,7 @@ class BOFTLayer(BaseTunerLayer):
|
||||
perm_mat = self.perm2mat(perm)
|
||||
P[i] = perm_mat
|
||||
|
||||
self.register_buffer("boft_P", P)
|
||||
self.register_buffer("boft_P", P, persistent=False)
|
||||
|
||||
self.boft_R[adapter_name] = nn.Parameter(
|
||||
torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size)
|
||||
@ -310,18 +354,11 @@ class BOFTLayer(BaseTunerLayer):
|
||||
|
||||
self.reset_boft_parameters(adapter_name, init_weights)
|
||||
|
||||
weight = getattr(self, "weight", None)
|
||||
if weight is not None:
|
||||
# the layer is already completely initialized, this is an update
|
||||
if weight.dtype.is_floating_point or weight.dtype.is_complex:
|
||||
self.to(weight.device, dtype=weight.dtype)
|
||||
else:
|
||||
self.to(weight.device)
|
||||
|
||||
# set the boft block size and number
|
||||
self.boft_block_size[adapter_name] = boft_block_size
|
||||
self.boft_block_num[adapter_name] = boft_block_num
|
||||
|
||||
self._move_adapter_to_device_of_base_layer(adapter_name)
|
||||
self.set_adapter(self.active_adapters)
|
||||
|
||||
def reset_boft_parameters(self, adapter_name, init_weights):
|
||||
@ -441,14 +478,6 @@ class Linear(nn.Module, BOFTLayer):
|
||||
|
||||
self._active_adapter = adapter_name
|
||||
|
||||
# Attempt to load the CUDA extension during model initialization
|
||||
if not get_fbd_cuda():
|
||||
self.fbd_cuda_available = False
|
||||
# If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process
|
||||
boft_n_butterfly_factor = 1
|
||||
else:
|
||||
self.fbd_cuda_available = True
|
||||
|
||||
self.update_layer(
|
||||
adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
|
||||
)
|
||||
@ -490,7 +519,7 @@ class Linear(nn.Module, BOFTLayer):
|
||||
f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken"
|
||||
)
|
||||
|
||||
self.base_layer.weight.data = orig_weight
|
||||
self.base_layer.weight.data = orig_weight.contiguous()
|
||||
else:
|
||||
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
|
||||
orig_weight = base_layer.weight.data.clone()
|
||||
@ -499,7 +528,7 @@ class Linear(nn.Module, BOFTLayer):
|
||||
orig_weight = torch.transpose(orig_weight, 0, 1)
|
||||
orig_weight = orig_weight * boft_s
|
||||
|
||||
self.base_layer.weight.data = orig_weight
|
||||
self.base_layer.weight.data = orig_weight.contiguous()
|
||||
|
||||
self.merged_adapters.append(active_adapter)
|
||||
|
||||
@ -544,8 +573,9 @@ class Linear(nn.Module, BOFTLayer):
|
||||
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
|
||||
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
|
||||
|
||||
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, self.boft_P.permute(0, 2, 1))
|
||||
butterfly_oft_mat_batch = torch.bmm(self.boft_P, butterfly_oft_mat_batch)
|
||||
boft_P = self.boft_P.to(block_diagonal_butterfly.device)
|
||||
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
|
||||
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
|
||||
butterfly_oft_mat = butterfly_oft_mat_batch[0]
|
||||
|
||||
for i in range(1, butterfly_oft_mat_batch.shape[0]):
|
||||
@ -563,8 +593,8 @@ class Linear(nn.Module, BOFTLayer):
|
||||
elif self.merged:
|
||||
result = self.base_layer(x, *args, **kwargs)
|
||||
else:
|
||||
boft_rotation = torch.eye(self.in_features, device=x.device)
|
||||
boft_scale = torch.ones((int(self.out_features), 1), device=x.device)
|
||||
boft_rotation = torch.eye(self.in_features, device=x.device, dtype=previous_dtype)
|
||||
boft_scale = torch.ones((int(self.out_features), 1), device=x.device, dtype=previous_dtype)
|
||||
|
||||
for active_adapter in self.active_adapters:
|
||||
if active_adapter not in self.boft_R.keys():
|
||||
@ -585,8 +615,11 @@ class Linear(nn.Module, BOFTLayer):
|
||||
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
|
||||
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
|
||||
|
||||
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, self.boft_P.permute(0, 2, 1))
|
||||
butterfly_oft_mat_batch = torch.bmm(self.boft_P, butterfly_oft_mat_batch)
|
||||
# The BOFT author's cayley_batch, dropout and FastBlockDiag ONLY return fp32 outputs.
|
||||
boft_P = self.boft_P.to(x)
|
||||
block_diagonal_butterfly = block_diagonal_butterfly.to(x)
|
||||
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
|
||||
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
|
||||
butterfly_oft_mat = butterfly_oft_mat_batch[0]
|
||||
|
||||
for i in range(1, butterfly_oft_mat_batch.shape[0]):
|
||||
@ -599,11 +632,16 @@ class Linear(nn.Module, BOFTLayer):
|
||||
|
||||
orig_weight = self.get_base_layer().weight.data
|
||||
orig_weight = torch.transpose(orig_weight, 0, 1)
|
||||
boft_rotation = boft_rotation.to(previous_dtype)
|
||||
orig_weight = orig_weight.to(previous_dtype)
|
||||
rotated_weight = torch.mm(boft_rotation, orig_weight)
|
||||
rotated_weight = torch.transpose(rotated_weight, 0, 1)
|
||||
|
||||
scaled_rotated_weight = rotated_weight * boft_scale
|
||||
|
||||
scaled_rotated_weight = scaled_rotated_weight.to(previous_dtype)
|
||||
if self.base_layer.bias is not None:
|
||||
self.base_layer.bias = self.base_layer.bias.to(previous_dtype)
|
||||
result = F.linear(input=x, weight=scaled_rotated_weight, bias=self.base_layer.bias)
|
||||
|
||||
result = result.to(previous_dtype)
|
||||
@ -634,15 +672,6 @@ class Conv2d(nn.Module, BOFTLayer):
|
||||
BOFTLayer.__init__(self, base_layer)
|
||||
|
||||
self._active_adapter = adapter_name
|
||||
|
||||
# Attempt to load the CUDA extension during model initialization
|
||||
if not get_fbd_cuda():
|
||||
self.fbd_cuda_available = False
|
||||
# If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process
|
||||
boft_n_butterfly_factor = 1
|
||||
else:
|
||||
self.fbd_cuda_available = True
|
||||
|
||||
self.update_layer(
|
||||
adapter_name, boft_block_size, boft_block_num, boft_n_butterfly_factor, boft_dropout, init_weights
|
||||
)
|
||||
@ -653,6 +682,15 @@ class Conv2d(nn.Module, BOFTLayer):
|
||||
"""
|
||||
Update the conv2d layer with trainable BOFT weights.
|
||||
"""
|
||||
|
||||
# Attempt to load the CUDA extension during model initialization
|
||||
if not get_fbd_cuda():
|
||||
self.fbd_cuda_available = False
|
||||
# If the CUDA extension is not available, set the butterfly factor to 1 to speed up the finetuning process
|
||||
boft_n_butterfly_factor = 1
|
||||
else:
|
||||
self.fbd_cuda_available = True
|
||||
|
||||
# to be consistent with the paper notation
|
||||
boft_n_butterfly_factor = boft_n_butterfly_factor - 1
|
||||
if boft_n_butterfly_factor < 0:
|
||||
@ -733,7 +771,7 @@ class Conv2d(nn.Module, BOFTLayer):
|
||||
perm_mat = self.perm2mat(perm)
|
||||
P[i] = perm_mat
|
||||
|
||||
self.register_buffer("boft_P", P)
|
||||
self.register_buffer("boft_P", P, persistent=False)
|
||||
|
||||
self.boft_R[adapter_name] = nn.Parameter(
|
||||
torch.zeros(boft_n_butterfly_factor + 1, boft_block_num, boft_block_size, boft_block_size)
|
||||
@ -742,19 +780,13 @@ class Conv2d(nn.Module, BOFTLayer):
|
||||
|
||||
self.reset_boft_parameters(adapter_name, init_weights)
|
||||
|
||||
weight = getattr(self, "weight", None)
|
||||
if weight is not None:
|
||||
# the layer is already completely initialized, this is an update
|
||||
if weight.dtype.is_floating_point or weight.dtype.is_complex:
|
||||
self.to(weight.device, dtype=weight.dtype)
|
||||
else:
|
||||
self.to(weight.device)
|
||||
self.set_adapter(self.active_adapters)
|
||||
|
||||
# set the boft block size and number
|
||||
self.boft_block_size[adapter_name] = boft_block_size
|
||||
self.boft_block_num[adapter_name] = boft_block_num
|
||||
|
||||
self._move_adapter_to_device_of_base_layer(adapter_name)
|
||||
self.set_adapter(self.active_adapters)
|
||||
|
||||
def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None:
|
||||
"""
|
||||
Merge the active adapter weights into the base weights
|
||||
@ -791,7 +823,7 @@ class Conv2d(nn.Module, BOFTLayer):
|
||||
self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0]
|
||||
)
|
||||
|
||||
self.base_layer.weight.data = orig_weight
|
||||
self.base_layer.weight.data = orig_weight.contiguous()
|
||||
else:
|
||||
butterfly_oft_mat, boft_s = self.get_delta_weight(active_adapter)
|
||||
|
||||
@ -805,7 +837,7 @@ class Conv2d(nn.Module, BOFTLayer):
|
||||
self.out_features, self.in_features, base_layer.kernel_size[0], base_layer.kernel_size[0]
|
||||
)
|
||||
|
||||
self.base_layer.weight.data = orig_weight
|
||||
self.base_layer.weight.data = orig_weight.contiguous()
|
||||
|
||||
self.merged_adapters.append(active_adapter)
|
||||
|
||||
@ -860,8 +892,9 @@ class Conv2d(nn.Module, BOFTLayer):
|
||||
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
|
||||
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
|
||||
|
||||
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, self.boft_P.permute(0, 2, 1))
|
||||
butterfly_oft_mat_batch = torch.bmm(self.boft_P, butterfly_oft_mat_batch)
|
||||
boft_P = self.boft_P.to(block_diagonal_butterfly.device)
|
||||
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
|
||||
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
|
||||
butterfly_oft_mat = butterfly_oft_mat_batch[0]
|
||||
|
||||
for i in range(1, butterfly_oft_mat_batch.shape[0]):
|
||||
@ -880,9 +913,11 @@ class Conv2d(nn.Module, BOFTLayer):
|
||||
result = self.base_layer(x, *args, **kwargs)
|
||||
else:
|
||||
boft_rotation = torch.eye(
|
||||
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0], device=x.device
|
||||
self.in_features * self.base_layer.kernel_size[0] * self.base_layer.kernel_size[0],
|
||||
device=x.device,
|
||||
dtype=x.dtype,
|
||||
)
|
||||
boft_scale = torch.ones((1, int(self.out_features)), device=x.device)
|
||||
boft_scale = torch.ones((1, int(self.out_features)), device=x.device, dtype=x.dtype)
|
||||
|
||||
for active_adapter in self.active_adapters:
|
||||
if active_adapter not in self.boft_R.keys():
|
||||
@ -903,8 +938,10 @@ class Conv2d(nn.Module, BOFTLayer):
|
||||
block_diagonal_butterfly = torch.block_diag(*torch.unbind(orth_rotate_butterfly))
|
||||
block_diagonal_butterfly = block_diagonal_butterfly.unsqueeze(0)
|
||||
|
||||
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, self.boft_P.permute(0, 2, 1))
|
||||
butterfly_oft_mat_batch = torch.bmm(self.boft_P, butterfly_oft_mat_batch)
|
||||
boft_P = self.boft_P.to(x)
|
||||
block_diagonal_butterfly = block_diagonal_butterfly.to(x)
|
||||
butterfly_oft_mat_batch = torch.bmm(block_diagonal_butterfly, boft_P.permute(0, 2, 1))
|
||||
butterfly_oft_mat_batch = torch.bmm(boft_P, butterfly_oft_mat_batch)
|
||||
butterfly_oft_mat = butterfly_oft_mat_batch[0]
|
||||
|
||||
for i in range(1, butterfly_oft_mat_batch.shape[0]):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user